code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__UpperCAmelCase : str = logging.getLogger(__name__)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
@dataclass
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """train"""
lowerCAmelCase__ = """dev"""
lowerCAmelCase__ = """test"""
class __snake_case :
'''simple docstring'''
@staticmethod
def UpperCAmelCase__ ( A : int , A : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def UpperCAmelCase__ ( A : str ):
raise NotImplementedError
@staticmethod
def UpperCAmelCase__ ( A : List[InputExample] , A : List[str] , A : int , A : PreTrainedTokenizer , A : Dict=False , A : Optional[Any]="[CLS]" , A : int=1 , A : str="[SEP]" , A : Optional[int]=False , A : List[Any]=False , A : Optional[Any]=0 , A : Tuple=0 , A : int=-100 , A : Optional[Any]=0 , A : Union[str, Any]=True , ):
__snake_case: Optional[Any] = {label: i for i, label in enumerate(A )}
__snake_case: str = []
for ex_index, example in enumerate(A ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , A , len(A ) )
__snake_case: Union[str, Any] = []
__snake_case: str = []
for word, label in zip(example.words , example.labels ):
__snake_case: Optional[Any] = tokenizer.tokenize(A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A ) > 0:
tokens.extend(A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__snake_case: Tuple = tokenizer.num_special_tokens_to_add()
if len(A ) > max_seq_length - special_tokens_count:
__snake_case: str = tokens[: (max_seq_length - special_tokens_count)]
__snake_case: List[str] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__snake_case: List[str] = [sequence_a_segment_id] * len(A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__snake_case: List[str] = [cls_token] + tokens
__snake_case: str = [pad_token_label_id] + label_ids
__snake_case: int = [cls_token_segment_id] + segment_ids
__snake_case: Union[str, Any] = tokenizer.convert_tokens_to_ids(A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__snake_case: str = [1 if mask_padding_with_zero else 0] * len(A )
# Zero-pad up to the sequence length.
__snake_case: Optional[Any] = max_seq_length - len(A )
if pad_on_left:
__snake_case: Optional[int] = ([pad_token] * padding_length) + input_ids
__snake_case: str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__snake_case: List[str] = ([pad_token_segment_id] * padding_length) + segment_ids
__snake_case: int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(A ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(A ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(A ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(A ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case: List[str] = None
features.append(
InputFeatures(
input_ids=A , attention_mask=A , token_type_ids=A , label_ids=A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : str=False , A : Split = Split.train , ):
# Load data features from cache or dataset file
__snake_case: Union[str, Any] = os.path.join(
A , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case: Optional[int] = cached_features_file + """.lock"""
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__snake_case: Dict = torch.load(A )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__snake_case: Optional[Any] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case: Optional[int] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , A )
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : List[Any] , A : Dict ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = -1_00
def __init__( self : List[str] , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : Optional[int]=False , A : Split = Split.train , ):
__snake_case: Union[str, Any] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case: int = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case: List[str] = tf.data.Dataset.from_generator(
A , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__snake_case: List[Any] = tf.data.Dataset.from_generator(
A , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Any ):
return len(self.features )
def __getitem__( self : int , A : Tuple ):
return self.features[i]
| 111 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """token-classification"""
def __init__( self : Union[str, Any] , A : List[Any] ):
if type(A ) == dict:
__snake_case: str = Namespace(**A )
__snake_case: str = import_module("""tasks""" )
try:
__snake_case: Tuple = getattr(A , hparams.task_type )
__snake_case: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
__snake_case: Optional[int] = self.token_classification_task.get_labels(hparams.labels )
__snake_case: str = CrossEntropyLoss().ignore_index
super().__init__(A , len(self.labels ) , self.mode )
def UpperCAmelCase__ ( self : Union[str, Any] , **A : Union[str, Any] ):
return self.model(**A )
def UpperCAmelCase__ ( self : Any , A : Optional[int] , A : str ):
__snake_case: str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: Any = self(**A )
__snake_case: Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = self.hparams
for mode in ["train", "dev", "test"]:
__snake_case: Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A )
__snake_case: Tuple = torch.load(A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__snake_case: List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , A )
__snake_case: Optional[int] = self.token_classification_task.convert_examples_to_features(
A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , A )
torch.save(A , A )
def UpperCAmelCase__ ( self : List[str] , A : int , A : int , A : bool = False ):
__snake_case: List[str] = self._feature_file(A )
logger.info("""Loading features from cached file %s""" , A )
__snake_case: int = torch.load(A )
__snake_case: Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case: Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__snake_case: Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__snake_case: Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__snake_case: Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A )
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] , A : int ):
"""Compute validation""" ""
__snake_case: List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: List[str] = self(**A )
__snake_case , __snake_case: int = outputs[:2]
__snake_case: List[str] = logits.detach().cpu().numpy()
__snake_case: Any = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__ ( self : Dict , A : Dict ):
__snake_case: Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__snake_case: Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__snake_case: List[str] = np.argmax(A , axis=2 )
__snake_case: Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__snake_case: Tuple = dict(enumerate(self.labels ) )
__snake_case: Dict = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case: Tuple = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__snake_case: Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(A , A ),
"""precision""": precision_score(A , A ),
"""recall""": recall_score(A , A ),
"""f1""": fa_score(A , A ),
}
__snake_case: Dict = dict(results.items() )
__snake_case: Dict = results
return ret, preds_list, out_label_list
def UpperCAmelCase__ ( self : Tuple , A : int ):
# when stable
__snake_case , __snake_case , __snake_case: List[str] = self._eval_end(A )
__snake_case: int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__ ( self : Dict , A : Optional[int] ):
# updating to test_epoch_end instead of deprecated test_end
__snake_case , __snake_case , __snake_case: int = self._eval_end(A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case: List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__ ( A : Any , A : int ):
# Add NER specific options
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCAmelCase : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Optional[int] = NERTransformer(args)
__UpperCAmelCase : Tuple = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__UpperCAmelCase : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 111 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 1 |
lowercase_ = {str(digit): digit**5 for digit in range(10)}
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def _snake_case( ) -> int:
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
print(solution())
| 7 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
from collections.abc import Callable
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , __magic_name__ : Callable | None = None ) -> None:
# Stores actual heap items.
SCREAMING_SNAKE_CASE_ = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_ = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_ = key or (lambda __magic_name__ : x)
def __A ( self : Any , __magic_name__ : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __A ( self : List[Any] , __magic_name__ : int ) -> int | None:
SCREAMING_SNAKE_CASE_ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __A ( self : Tuple , __magic_name__ : int ) -> int | None:
SCREAMING_SNAKE_CASE_ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __A ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.arr[j], self.arr[i]
def __A ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __A ( self : int , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self._left(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._right(__magic_name__ )
SCREAMING_SNAKE_CASE_ = i
if left is not None and not self._cmp(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = left
if right is not None and not self._cmp(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = right
return valid_parent
def __A ( self : Any , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = self._parent(__magic_name__ )
while parent is not None and not self._cmp(__magic_name__ , __magic_name__ ):
self._swap(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parent, self._parent(__magic_name__ )
def __A ( self : List[Any] , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = self._get_valid_parent(__magic_name__ )
while valid_parent != index:
self._swap(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = valid_parent, self._get_valid_parent(__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int ) -> None:
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ = self.pos_map[item]
SCREAMING_SNAKE_CASE_ = [item, self.key(__magic_name__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__magic_name__ )
self._heapify_down(__magic_name__ )
def __A ( self : Tuple , __magic_name__ : int ) -> None:
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_ = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__magic_name__ )
self._heapify_down(__magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__magic_name__ )] )
else:
SCREAMING_SNAKE_CASE_ = [item, self.key(__magic_name__ )]
SCREAMING_SNAKE_CASE_ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __A ( self : Optional[int] ) -> tuple | None:
return self.arr[0] if self.size else None
def __A ( self : Union[str, Any] ) -> tuple | None:
SCREAMING_SNAKE_CASE_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def a__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : Optional[Any] = scope
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Any = model(snake_case_ , attention_mask=snake_case_ )
_lowerCAmelCase : str = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : List[str] = BioGptForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Optional[int] = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# create attention mask
_lowerCAmelCase : int = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
_lowerCAmelCase : str = self.seq_length // 2
_lowerCAmelCase : List[str] = 0
# first forward pass
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_lowerCAmelCase : List[str] = ids_tensor((1,) , snake_case_ ).item() + 1
_lowerCAmelCase : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_lowerCAmelCase : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
_lowerCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Optional[int] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case_ )] , dim=1 , )
# get two different outputs
_lowerCAmelCase : Optional[int] = model(snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
_lowerCAmelCase : List[Any] = model(snake_case_ , past_key_values=snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : str = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Tuple = BioGptModel(config=snake_case_ ).to(snake_case_ ).eval()
_lowerCAmelCase : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
# first forward pass
_lowerCAmelCase : Dict = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase : Tuple = model(snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
_lowerCAmelCase : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[
"""last_hidden_state"""
]
# select random slice
_lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , snake_case_=False ):
_lowerCAmelCase : Optional[int] = BioGptForCausalLM(snake_case_ )
model.to(snake_case_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCamelCase ( self , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Tuple = BioGptModel(snake_case_ )
_lowerCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : Union[str, Any] = BioGptForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ (_a , _a , _a , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : Tuple = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = BioGptModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case_ , gradient_checkpointing=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase : Optional[int] = """left"""
# Define PAD Token = EOS Token = 50256
_lowerCAmelCase : Optional[int] = tokenizer.eos_token
_lowerCAmelCase : int = model.config.eos_token_id
# use different length sentences to test batching
_lowerCAmelCase : str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
_lowerCAmelCase : Tuple = tokenizer(snake_case_ , return_tensors="""pt""" , padding=snake_case_ )
_lowerCAmelCase : Optional[int] = inputs["""input_ids"""].to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = model.generate(
input_ids=snake_case_ , attention_mask=inputs["""attention_mask"""].to(snake_case_ ) , )
_lowerCAmelCase : int = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case_ )
_lowerCAmelCase : Optional[int] = model.generate(input_ids=snake_case_ )
_lowerCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
_lowerCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case_ )
_lowerCAmelCase : Any = model.generate(input_ids=snake_case_ , max_length=model.config.max_length - num_paddings )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
_lowerCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
_lowerCAmelCase : str = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCamelCase ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : List[str] = input_dict["""input_ids"""]
_lowerCAmelCase : Tuple = input_ids.ne(1 ).to(snake_case_ )
_lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Dict = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : int = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = 3
_lowerCAmelCase : List[str] = """multi_label_classification"""
_lowerCAmelCase : Dict = input_dict["""input_ids"""]
_lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase : Optional[int] = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : int = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
_lowerCAmelCase : str = model(snake_case_ )[0]
_lowerCAmelCase : Dict = 4_2_3_8_4
_lowerCAmelCase : Optional[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_lowerCAmelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case_ )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case_ )
_lowerCAmelCase : Any = model.generate(
**snake_case_ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=snake_case_ , )
_lowerCAmelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case_ )
_lowerCAmelCase : Union[str, Any] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case_ , snake_case_ )
| 309 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class _lowercase ( a__ ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
a = {}
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
a = super().add_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=1 , **lowerCamelCase_ ):
"""simple docstring"""
a = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
output.append(_lowerCamelCase )
else:
a = []
for i in range(_lowerCamelCase ):
a = placeholder_token + F'''_{i}'''
self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
output.append(_lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
a = output
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 ):
"""simple docstring"""
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a = []
for i in range(len(_lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a = self.token_map[placeholder_token]
a = tokens[: 1 + int(len(_lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
a = copy.copy(_lowerCamelCase )
random.shuffle(_lowerCamelCase )
a = text.replace(_lowerCamelCase , " ".join(_lowerCamelCase ) )
return text
def __call__(self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
| 369 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True} )
__A = Features({"audio": Audio()} )
__A = Features({"transcription": Value("string" )} )
__A = "audio"
__A = "transcription"
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
a = copy.deepcopy(self )
a = self.input_schema.copy()
a = features[self.audio_column]
a = input_schema
return task_template
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 71 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowercase_ ( unittest.TestCase ):
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
UpperCamelCase_ = load_dataset("""ashraq/esc50""" )
UpperCamelCase_ = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_ = audio_classifier(__UpperCamelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
UpperCamelCase_ = load_dataset("""ashraq/esc50""" )
UpperCamelCase_ = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_ = audio_classifier(__UpperCamelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
UpperCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
UpperCamelCase_ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
| 122 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Any = DanceDiffusionPipeline
A__ : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A__ : List[Any] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A__ : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A__ : str = False
A__ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_ = IPNDMScheduler()
UpperCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = DanceDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 122 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Dict = """gpt_neox_japanese"""
def __init__(self : Tuple , __a : Optional[int]=32000 , __a : List[str]=2560 , __a : Optional[int]=32 , __a : Optional[Any]=32 , __a : str=4 , __a : str="gelu" , __a : str=1.00 , __a : Optional[int]=10000 , __a : Optional[Any]=2048 , __a : Optional[Any]=0.02 , __a : Optional[int]=1E-5 , __a : Optional[int]=True , __a : Dict=31996 , __a : Optional[int]=31999 , __a : Union[str, Any]=0.1 , __a : Any=0.0 , **__a : Any , ):
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_multiple_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = rotary_pct
UpperCAmelCase_ = rotary_emb_base
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = hidden_dropout
| 106 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ = Dataset.from_dict(snake_case_ )
return dataset
class __A ( UpperCamelCase__ ):
def _lowercase (self : str ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __a )
| 106 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( lowerCAmelCase ):
_a : BigBirdConfig
_a : jnp.dtype= jnp.floataa
_a : bool= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setup()
lowercase : List[str] = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : int = super().__call__(*snake_case ,**snake_case )
lowercase : Any = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= FlaxBigBirdForNaturalQuestionsModule
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
def cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : int = logits.shape[-1]
lowercase : Dict = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype("""f4""" )
lowercase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
lowercase : Optional[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase : Any = reduction(SCREAMING_SNAKE_CASE__ )
return loss
lowercase : Optional[Any] = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean )
lowercase : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
_a : str= "google/bigbird-roberta-base"
_a : int= 3000
_a : int= 1_0500
_a : int= 128
_a : int= 3
_a : int= 1
_a : int= 5
# tx_args
_a : float= 3E-5
_a : float= 0.0
_a : int= 2_0000
_a : float= 0.00_95
_a : str= "bigbird-roberta-natural-questions"
_a : str= "training-expt"
_a : str= "data/nq-training.jsonl"
_a : str= "data/nq-validation.jsonl"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=snake_case )
lowercase : Optional[int] = os.path.join(self.base_dir ,self.save_dir )
lowercase : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
_a : int
_a : int= 4096 # no dynamic padding on TPUs
def __call__( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.collate_fn(snake_case )
lowercase : Union[str, Any] = jax.tree_util.tree_map(snake_case ,snake_case )
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
lowercase : Tuple = {
"""input_ids""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = [self._fetch_inputs(snake_case ) for ids in input_ids]
return zip(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = [1 for _ in range(len(snake_case ) )]
while len(snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Any:
if seed is not None:
lowercase : Optional[int] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ):
lowercase : Optional[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE__ )
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
def loss_fn(SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = model_inputs.pop("""start_labels""" )
lowercase : Optional[int] = model_inputs.pop("""end_labels""" )
lowercase : str = model_inputs.pop("""pooled_labels""" )
lowercase : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[str] = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
lowercase , lowercase : int = jax.random.split(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = jax.value_and_grad(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = grad_fn(state.params )
lowercase : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase : List[Any] = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , """batch""" )
lowercase : str = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : int = model_inputs.pop("""start_labels""" )
lowercase : Dict = model_inputs.pop("""end_labels""" )
lowercase : Optional[Any] = model_inputs.pop("""pooled_labels""" )
lowercase : Optional[int] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[Any] = outputs
lowercase : Dict = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __snake_case ( train_state.TrainState ):
_a : Callable= struct.field(pytree_node=lowerCAmelCase )
@dataclass
class __snake_case :
_a : Args
_a : Callable
_a : Callable
_a : Callable
_a : Callable
_a : wandb
_a : Callable= None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = model.params
lowercase : Any = TrainState.create(
apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,loss_fn=snake_case ,)
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = restore_checkpoint(snake_case ,snake_case )
lowercase : List[str] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase , lowercase : Tuple = build_tx(**snake_case )
lowercase : str = train_state.TrainState(
step=snake_case ,apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,opt_state=snake_case ,)
lowercase : Any = args
lowercase : Optional[Any] = data_collator
lowercase : List[str] = lr
lowercase : str = params
lowercase : Tuple = jax_utils.replicate(snake_case )
return state
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.args
lowercase : Optional[Any] = len(snake_case ) // args.batch_size
lowercase : int = jax.random.PRNGKey(0 )
lowercase : List[str] = jax.random.split(snake_case ,jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : List[str] = get_batched_dataset(snake_case ,args.batch_size ,seed=snake_case )
lowercase : int = 0
for batch in tqdm(snake_case ,total=snake_case ,desc=f"Running EPOCH-{epoch}" ):
lowercase : Dict = self.data_collator(snake_case )
lowercase , lowercase , lowercase : Optional[int] = self.train_step_fn(snake_case ,snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowercase : Optional[Any] = jax_utils.unreplicate(state.step )
lowercase : List[str] = running_loss.item() / i
lowercase : List[str] = self.scheduler_fn(state_step - 1 )
lowercase : int = self.evaluate(snake_case ,snake_case )
lowercase : Tuple = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(snake_case ) )
self.logger.log(snake_case ,commit=snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" ,state=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = get_batched_dataset(snake_case ,self.args.batch_size )
lowercase : Any = len(snake_case ) // self.args.batch_size
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : Optional[int] = 0
for batch in tqdm(snake_case ,total=snake_case ,desc="""Evaluating ... """ ):
lowercase : Tuple = self.data_collator(snake_case )
lowercase : Optional[int] = self.val_step_fn(snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = jax_utils.unreplicate(snake_case )
print(f"SAVING CHECKPOINT IN {save_dir}" ,end=""" ... """ )
self.model_save_fn(snake_case ,params=state.params )
with open(os.path.join(snake_case ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(snake_case ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(snake_case ,"""data_collator.joblib""" ) )
with open(os.path.join(snake_case ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,snake_case )
print("""DONE""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase : str = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase : Optional[int] = from_bytes(state.opt_state , f.read() )
lowercase : Optional[Any] = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """args.joblib""" ) )
lowercase : int = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """data_collator.joblib""" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """training_state.json""" ) , """r""" ) as f:
lowercase : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : List[str] = num_train_steps - warmup_steps
lowercase : Dict = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
def weight_decay_mask(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ )
return tx, lr
| 20 | 1 |
"""simple docstring"""
import argparse
import os
import re
__snake_case = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__snake_case = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
__snake_case = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def __lowerCAmelCase ( lowercase : int , lowercase : bool = False ) -> int:
"""simple docstring"""
with open(lowercase , "r" , encoding="utf-8" ) as f:
snake_case : str = f.read()
snake_case : Any = content.split("\n" )
snake_case : List[Any] = []
snake_case : Dict = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case : str = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case : Tuple = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case : Dict = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def __lowerCAmelCase ( lowercase : bool = False ) -> Any:
"""simple docstring"""
snake_case : Dict = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
snake_case : List[Any] = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
snake_case : Tuple = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 112 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : int = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = object_detector(examples[0] , threshold=0.0 )
snake_case : str = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
"score": ANY(UpperCamelCase__ ),
"label": ANY(UpperCamelCase__ ),
"box": {"xmin": ANY(UpperCamelCase__ ), "ymin": ANY(UpperCamelCase__ ), "xmax": ANY(UpperCamelCase__ ), "ymax": ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
snake_case : Dict = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Optional[int] = pipeline("zero-shot-object-detection" )
snake_case : Tuple = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
snake_case : List[Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = 0.2
snake_case : List[str] = pipeline("zero-shot-object-detection" )
snake_case : List[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = 2
snake_case : Optional[Any] = pipeline("zero-shot-object-detection" )
snake_case : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 112 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: Any = "realm"
def __init__( self : Any , _A : Tuple=30522 , _A : Optional[Any]=768 , _A : Optional[Any]=128 , _A : Optional[int]=12 , _A : Any=12 , _A : List[Any]=8 , _A : List[str]=3072 , _A : Any="gelu_new" , _A : Union[str, Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=512 , _A : Union[str, Any]=2 , _A : Optional[Any]=0.0_2 , _A : Dict=1E-12 , _A : List[Any]=256 , _A : Optional[int]=10 , _A : Union[str, Any]=1E-3 , _A : int=5 , _A : str=320 , _A : str=13353718 , _A : List[Any]=5000 , _A : Optional[Any]=1 , _A : str=0 , _A : Tuple=2 , **_A : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
# Common config
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Tuple = max_position_embeddings
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = retriever_proj_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Optional[int] = num_candidates
snake_case_ : int = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Any = initializer_range
snake_case_ : Dict = type_vocab_size
snake_case_ : Any = layer_norm_eps
# Reader config
snake_case_ : str = span_hidden_size
snake_case_ : Optional[int] = max_span_width
snake_case_ : Optional[int] = reader_layer_norm_eps
snake_case_ : List[Any] = reader_beam_size
snake_case_ : Optional[int] = reader_seq_len
# Retrieval config
snake_case_ : List[str] = num_block_records
snake_case_ : List[str] = searcher_beam_size
| 327 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__lowercase : Optional[Any] = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Optional[int] = torch.manual_seed(0 )
__a : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__a : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = generator.manual_seed(0 )
__a : Dict = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : str = 'cyberpunk 2077'
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : List[Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a : Union[str, Any] = 'A painting of a squirrel eating a burger '
__a : List[str] = torch.manual_seed(0 )
__a : List[str] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a : str = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__a : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : int = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 294 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294 | 1 |
"""simple docstring"""
_a = 8.3_144_598
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a = 3_00
_a = 28
_a = rms_speed_of_molecule(temperature, molar_mass)
print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 17 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[str] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModel.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =AutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =AutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : int =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =AutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] =AutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[Any] =AutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : str =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : int =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
| 71 | 0 |
def snake_case( __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def snake_case( ) -> Optional[int]:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 356 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''input_values''', '''attention_mask''']
def __init__( self : Optional[Any] , _A : int = 1 , _A : int = 16_000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7_600 , _A : float = 1E-10 , _A : int = 2 , _A : bool = True , **_A : int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
lowercase : str = do_normalize
lowercase : int = return_attention_mask
lowercase : Union[str, Any] = num_mel_bins
lowercase : Union[str, Any] = hop_length
lowercase : Dict = win_length
lowercase : Union[str, Any] = win_function
lowercase : int = frame_signal_scale
lowercase : Dict = fmin
lowercase : Optional[Any] = fmax
lowercase : str = mel_floor
lowercase : Dict = reduction_factor
lowercase : List[Any] = win_length * sampling_rate // 1_000
lowercase : Union[str, Any] = hop_length * sampling_rate // 1_000
lowercase : Optional[Any] = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
lowercase : Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
lowercase : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[int] = np.array(_A , np.intaa )
lowercase : Dict = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : List[str] = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __a ( self : Any , _A : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : List[Any] , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : Tuple , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
lowercase : Any = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
lowercase : Any = None
if audio_target is not None:
lowercase : Tuple = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
lowercase : Any = inputs_target['''input_values''']
lowercase : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase : Union[str, Any] = decoder_attention_mask
return inputs
def __a ( self : List[Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : Any , ) -> BatchFeature:
"""simple docstring"""
lowercase : Optional[int] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[str] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase : List[str] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase : Any = self.feature_size
# convert into correct format for padding
if is_target:
lowercase : int = [self._extract_mel_features(_A ) for waveform in speech]
lowercase : Any = BatchFeature({'''input_values''': features} )
lowercase : Optional[Any] = self.num_mel_bins
else:
lowercase : Optional[Any] = BatchFeature({'''input_values''': speech} )
lowercase : Optional[int] = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
lowercase : str = feature_size_hack
# convert input values to correct format
lowercase : List[Any] = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
lowercase : List[str] = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase : List[str] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase : Optional[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase : int = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase : Any = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase : Optional[int] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
lowercase : Tuple = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase : Optional[int] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 116 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
__UpperCamelCase : Any = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
__UpperCamelCase : List[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCamelCase : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCamelCase : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__UpperCamelCase : Dict = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 106 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A_ ) or index < 0:
return None
lowerCAmelCase__ : str = items[random.randint(0 , len(A_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = _partition(A_ , A_ )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[Any] = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_ , A_ )
# must be in larger
else:
return quick_select(A_ , index - (m + count) )
| 106 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : int = 'Hello world! cécé herlolip'
def snake_case (A_ :str , A_ :str , A_ :bool ):
'''simple docstring'''
a : int = FairseqRobertaModel.from_pretrained(A_ )
roberta.eval() # disable dropout
a : Union[str, Any] = roberta.model.encoder.sentence_encoder
a : List[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
a : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , A_ )
a : Optional[Any] = XLMRobertaXLForSequenceClassification(A_ ) if classification_head else XLMRobertaXLForMaskedLM(A_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
a : str = roberta_sent_encoder.embed_tokens.weight
a : Optional[int] = roberta_sent_encoder.embed_positions.weight
a : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
a : Optional[Any] = roberta_sent_encoder.layer_norm.weight
a : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
a : BertLayer = model.roberta.encoder.layer[i]
a : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
a : RobertaAttention = layer.attention
a : List[Any] = roberta_layer.self_attn_layer_norm.weight
a : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
a : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
a : List[Any] = roberta_layer.self_attn.q_proj.weight
a : Optional[int] = roberta_layer.self_attn.q_proj.bias
a : int = roberta_layer.self_attn.k_proj.weight
a : Optional[int] = roberta_layer.self_attn.k_proj.bias
a : str = roberta_layer.self_attn.v_proj.weight
a : str = roberta_layer.self_attn.v_proj.bias
# self-attention output
a : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
a : Dict = roberta_layer.self_attn.out_proj.weight
a : List[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
a : List[Any] = roberta_layer.final_layer_norm.weight
a : Dict = roberta_layer.final_layer_norm.bias
# intermediate
a : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
a : Tuple = roberta_layer.fca.weight
a : Tuple = roberta_layer.fca.bias
# output
a : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
a : Union[str, Any] = roberta_layer.fca.weight
a : Tuple = roberta_layer.fca.bias
# end of layer
if classification_head:
a : List[str] = roberta.model.classification_heads['mnli'].dense.weight
a : List[str] = roberta.model.classification_heads['mnli'].dense.bias
a : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight
a : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
a : int = roberta.model.encoder.lm_head.dense.weight
a : str = roberta.model.encoder.lm_head.dense.bias
a : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
a : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
a : Tuple = roberta.model.encoder.lm_head.weight
a : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
a : torch.Tensor = roberta.encode(A_ ).unsqueeze(0 ) # batch of size 1
a : Union[str, Any] = model(A_ )[0]
if classification_head:
a : List[str] = roberta.model.classification_heads['mnli'](roberta.extract_features(A_ ) )
else:
a : List[Any] = roberta.model(A_ )[0]
print(our_output.shape , their_output.shape )
a : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
a : Union[str, Any] = torch.allclose(A_ , A_ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(A_ ).mkdir(parents=A_ , exist_ok=A_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 357 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
a : int = AutoTokenizer.from_pretrained('xlm-roberta-base' )
a : int = 'The dog is cute and lives in the garden house'
a : List[Any] = jnp.array([tokenizer.encode(A )] )
a : int = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
a : Dict = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
a : Any = model(A )['last_hidden_state']
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1E-3 ) )
| 186 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = 1, 1
for _ in range(number_of_steps - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase__ : str = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''summarization'''
_A : Optional[Any] = ['''loss''']
_A : Tuple = ROUGE_KEYS
_A : int = '''rouge2'''
def __init__( self : int , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__SCREAMING_SNAKE_CASE : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : int = Path(self.output_dir ) / """metrics.json"""
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[Any] = defaultdict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.config.model_type
__SCREAMING_SNAKE_CASE : List[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__SCREAMING_SNAKE_CASE : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__SCREAMING_SNAKE_CASE : Any = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__SCREAMING_SNAKE_CASE : Any = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__SCREAMING_SNAKE_CASE : Any = get_git_info()["""repo_sha"""]
__SCREAMING_SNAKE_CASE : Any = hparams.num_workers
__SCREAMING_SNAKE_CASE : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__SCREAMING_SNAKE_CASE : Any = self.decoder_start_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.hparams.eval_max_gen_length
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.max_length
__SCREAMING_SNAKE_CASE : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict[str, torch.Tensor] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__SCREAMING_SNAKE_CASE : Optional[int] = True
return readable_batch
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""], batch["""attention_mask"""]
__SCREAMING_SNAKE_CASE : Tuple = batch["""labels"""]
if isinstance(self.model , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = self.model._shift_right(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__SCREAMING_SNAKE_CASE : Tuple = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].eq(self.pad ).sum()
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]="val" ):
"""simple docstring"""
self.step_count += 1
__SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__SCREAMING_SNAKE_CASE : List[Any] = losses["""loss"""]
__SCREAMING_SNAKE_CASE : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__SCREAMING_SNAKE_CASE : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
__SCREAMING_SNAKE_CASE : int = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ):
"""simple docstring"""
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__SCREAMING_SNAKE_CASE : List[str] = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (time.time() - ta) / batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch["""labels"""] )
__SCREAMING_SNAKE_CASE : Optional[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
return self.validation_epoch_end(lowerCAmelCase__ , prefix="""test""" )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path]
__SCREAMING_SNAKE_CASE : str = self.target_lens[type_path]
__SCREAMING_SNAKE_CASE : str = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Optional[int] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--logger_name""" , type=lowerCAmelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCAmelCase__ , default=5_0_0 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCAmelCase__ , default="""summarization""" , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--eval_beams""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"""--val_metric""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[Any] = '''translation'''
_A : int = ['''loss''']
_A : Union[str, Any] = ['''bleu''']
_A : Dict = '''bleu'''
def __init__( self : Any , lowerCAmelCase__ : int , **lowerCAmelCase__ : Any ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hparams.src_lang
__SCREAMING_SNAKE_CASE : Dict = hparams.tgt_lang
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__SCREAMING_SNAKE_CASE : str = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Any = os.environ.get("""WANDB_PROJECT""" , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Optional[int] = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
__SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = args.val_metric == """loss"""
__SCREAMING_SNAKE_CASE : pl.Trainer = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__SCREAMING_SNAKE_CASE : Optional[int] = """"""
__SCREAMING_SNAKE_CASE : Any = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_lowerCamelCase ) )
if checkpoints:
__SCREAMING_SNAKE_CASE : List[Any] = checkpoints[-1]
__SCREAMING_SNAKE_CASE : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
UpperCamelCase__ : Dict = pl.Trainer.add_argparse_args(parser)
UpperCamelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ : List[str] = parser.parse_args()
main(args)
| 112 | 1 |
from copy import deepcopy
class __lowerCAmelCase :
def __init__(self , __magic_name__ = None , __magic_name__ = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
snake_case_ : Union[str, Any] = size
snake_case_ : Dict = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError('''Either arr or size must be specified''' )
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : Optional[int] = len(__magic_name__ )
snake_case_ : Optional[Any] = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
snake_case_ : Any = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase (self ) -> list[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[Any] = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase (__magic_name__ ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase (__magic_name__ ) -> int:
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Dict = self.next_(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
if right == 0:
return 0
snake_case_ : List[Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Union[str, Any] = self.prev(__magic_name__ )
return result
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Optional[int] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Any = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.dummy_uncond_unet
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Optional[Any] = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Dict = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' ).images
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : str = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' , return_dict=__magic_name__ )[0]
snake_case_ : Any = image[0, -3:, -3:, -1]
snake_case_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = '''google/ddpm-cifar10-32'''
snake_case_ : Tuple = UNetaDModel.from_pretrained(__magic_name__ )
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Any = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Tuple = pndm(generator=__magic_name__ , output_type='''numpy''' ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 279 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_snake_case = os.path.join(git_repo_path, 'src', 'diffusers')
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : List[Any] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(UpperCAmelCase__ , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_a : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(UpperCAmelCase__ , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_a : int = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(UpperCAmelCase__ , """torch_and_transformers_and_onnx""" )
def _lowercase ( self : str ) -> Optional[Any]:
_a : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCAmelCase__ )
self.assertIn("""torch_and_transformers""" , UpperCAmelCase__ )
self.assertIn("""flax_and_transformers""" , UpperCAmelCase__ )
self.assertIn("""torch_and_transformers_and_onnx""" , UpperCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _lowercase ( self : Any ) -> int:
_a : List[str] = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(UpperCAmelCase__ , """\nCONSTANT = None\n""" )
_a : Tuple = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
UpperCAmelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_a : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_a : List[Any] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Any:
_a : List[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_a : Optional[int] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , UpperCAmelCase__ )
| 294 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
def get_matched_characters(snake_case_ :str , snake_case_ :str ) -> str:
__UpperCAmelCase = []
__UpperCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__UpperCAmelCase = int(max(0 , i - limit ) )
__UpperCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(snake_case_ )
__UpperCAmelCase = F'''{_stra[0:_stra.index(snake_case_ )]} {_stra[_stra.index(snake_case_ ) + 1:]}'''
return "".join(snake_case_ )
# matching characters
__UpperCAmelCase = get_matched_characters(snake_case_ , snake_case_ )
__UpperCAmelCase = get_matched_characters(snake_case_ , snake_case_ )
__UpperCAmelCase = len(snake_case_ )
# transposition
__UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(snake_case_ , snake_case_ ) if ca != ca] ) // 2
)
if not match_count:
__UpperCAmelCase = 0.0
else:
__UpperCAmelCase = (
1
/ 3
* (
match_count / len(snake_case_ )
+ match_count / len(snake_case_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__UpperCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 86 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self : str , _lowercase : list ):
__UpperCAmelCase = set_counts
__UpperCAmelCase = max(_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = [1] * num_sets
__UpperCAmelCase = list(range(_lowercase ) )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = self.get_parent(_lowercase )
__UpperCAmelCase = self.get_parent(_lowercase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase = 0
__UpperCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase = 0
__UpperCAmelCase = src_parent
__UpperCAmelCase = self.set_counts[src_parent]
__UpperCAmelCase = max(self.max_set , _lowercase )
return True
def a ( self : Optional[Any] , _lowercase : int ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 86 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _A (__a , __a ) -> List[Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ : List[str] = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE_ : Dict = torch.permute(__a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE_ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ : Tuple = layer.split('''metadata''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ : List[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ : Any = layer.split('''kvstore''' )
SCREAMING_SNAKE_CASE_ : Tuple = ''''''.join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer.split('''/''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = '''/'''.join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ : str = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''file'''
else:
SCREAMING_SNAKE_CASE_ : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = rename_keys(__a )
SCREAMING_SNAKE_CASE_ : List[str] = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = v
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_current_block
torch.save(__a , __a )
def _A (__a , __a , __a , __a , __a = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_file_size_to_int(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
os.makedirs(__a , exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
SCREAMING_SNAKE_CASE_ : Dict = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = flatten_dict(__a , sep='''/''' )
SCREAMING_SNAKE_CASE_ : Tuple = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_key_and_tensorstore_dict(
__a , __a , __a )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ : Any = content
else:
SCREAMING_SNAKE_CASE_ : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = rename_base_flax_keys(tuple(key.split('''/''' ) ) , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''/'''.join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(
__a , weights_name.replace('''.bin''' , f'-{len(__a )+1:05d}-of-???.bin' ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = raw_weights.to(getattr(__a , __a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(__a , weights_name.replace('''.bin''' , f'-{len(__a )+1:05d}-of-???.bin' ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for idx, shard in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : List[str] = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(__a ):05d}.bin' ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(__a , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__a , os.path.join(__a , __a ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = shard
for key in shard:
SCREAMING_SNAKE_CASE_ : Optional[Any] = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ : Dict = {'''total_size''': total_size}
SCREAMING_SNAKE_CASE_ : List[Any] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__a , __a ) , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '''\n'''
f.write(__a )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCAmelCase_ : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _A () -> Tuple:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ : str = TaTokenizer.from_pretrained('''t5-small''' )
SCREAMING_SNAKE_CASE_ : str = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(__a , return_tensors='''pt''' ).input_ids
SCREAMING_SNAKE_CASE_ : int = model.generate(__a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 91 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "xmod"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=2, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=("en_XX",), lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : int = vocab_size
A : int = hidden_size
A : str = num_hidden_layers
A : List[str] = num_attention_heads
A : List[str] = hidden_act
A : Dict = intermediate_size
A : Optional[int] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Union[str, Any] = position_embedding_type
A : Any = use_cache
A : int = classifier_dropout
A : int = pre_norm
A : List[str] = adapter_reduction_factor
A : Any = adapter_layer_norm
A : Any = adapter_reuse_layer_norm
A : str = ln_before_adapter
A : Dict = list(lowerCamelCase__ )
A : Union[str, Any] = default_language
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 116 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Any:
for i in range(0 , UpperCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> Any:
for i in range(UpperCAmelCase__ , 0 , -1 ):
for _ in range(UpperCAmelCase__ , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(UpperCAmelCase__ ) # upper half
reverse_floyd(UpperCAmelCase__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
_lowercase : Dict = 1
while K:
_lowercase : Optional[int] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
_lowercase : List[str] = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 21 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
UpperCamelCase__ = field(
default='''O1''', metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ : Optional[Any] = torch.device("""cpu""" )
lowercase_ : Tuple = 0
elif is_torch_tpu_available():
lowercase_ : Optional[int] = xm.xla_device()
lowercase_ : str = 0
else:
lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.n_gpu > 0
| 21 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a__ = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
a__ = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a__ = sorted(arg_to_scheduler.keys())
a__ = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class snake_case ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="base" , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : int , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_SCREAMING_SNAKE_CASE)
_snake_case : List[str] = 0
_snake_case : Tuple = Path(self.hparams.output_dir)
_snake_case : Any = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Optional[int] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Union[str, Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
assert hasattr(self.config , _SCREAMING_SNAKE_CASE), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , _SCREAMING_SNAKE_CASE , getattr(self.hparams , _SCREAMING_SNAKE_CASE))
if tokenizer is None:
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : List[str] = MODEL_MODES[mode]
if model is None:
_snake_case : Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path) , config=self.config , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
_snake_case : Dict = model
def UpperCamelCase_ ( self : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : int = self.model_type.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : List[str]) -> Any:
"""simple docstring"""
_snake_case : List[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
_snake_case : Any = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCamelCase_ ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Tuple = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[Any] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Dict = Adafactor(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=_SCREAMING_SNAKE_CASE , relative_step=_SCREAMING_SNAKE_CASE)
else:
_snake_case : List[str] = AdamW(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
_snake_case : int = optimizer
_snake_case : List[str] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
return self.validation_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
return self.validation_end(_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : Tuple) -> int:
"""simple docstring"""
_snake_case : Tuple = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
_snake_case : Any = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCamelCase_ ( self : str , lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
if stage == "test":
_snake_case : str = len(self.test_dataloader().dataset)
else:
_snake_case : Optional[int] = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
_snake_case : Dict = len(self.train_dataloader().dataset)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple = False) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""")
def UpperCamelCase_ ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
return self.train_loader
def UpperCamelCase_ ( self : Optional[int]) -> str:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : int) -> int:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : int , lowerCAmelCase : int) -> int:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
_SCREAMING_SNAKE_CASE , list(filter(_SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/"""))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Optional[Any]) -> None:
"""simple docstring"""
_snake_case : Optional[Any] = self.output_dir.joinpath("""best_tfmr""")
_snake_case : List[str] = self.step_count
self.model.save_pretrained(_SCREAMING_SNAKE_CASE)
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE)
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=_SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""")
parser.add_argument(
"""--tokenizer_name""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(_SCREAMING_SNAKE_CASE).parent / """test_run""" / """cache""") , type=_SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=_SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=_SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=_SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=_SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=_SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""")
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=_SCREAMING_SNAKE_CASE , metavar=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""")
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=_SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--warmup_steps""" , default=0 , type=_SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""")
parser.add_argument("""--num_workers""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""")
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("""--train_batch_size""" , default=32 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("""--eval_batch_size""" , default=32 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("""--adafactor""" , action="""store_true""")
class snake_case ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class snake_case ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase_ ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int) -> int:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_SCREAMING_SNAKE_CASE)
class snake_case ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
_snake_case : List[str] = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : List[str] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""")
_snake_case : str = trainer.callback_metrics
# Log results
for key in sorted(_SCREAMING_SNAKE_CASE):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
rank_zero_info("""***** Test results *****""")
_snake_case : Optional[Any] = trainer.callback_metrics
# Log and save results to file
_snake_case : Optional[int] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""")
with open(_SCREAMING_SNAKE_CASE , """w""") as writer:
for key in sorted(_SCREAMING_SNAKE_CASE):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
writer.write("""{} = {}\n""".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """model_checkpoints""" ) , type=SCREAMING_SNAKE_CASE__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """dummy-train-data""" ) , type=SCREAMING_SNAKE_CASE__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
pl.seed_everything(args.seed )
# init model
_snake_case : Optional[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : str = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(SCREAMING_SNAKE_CASE__ )
if logging_callback is None:
_snake_case : Optional[Any] = LoggingCallback()
_snake_case : Union[str, Any] = {}
if args.fpaa:
_snake_case : str = 16
if args.gpus > 1:
_snake_case : str = '''auto'''
_snake_case : Optional[int] = '''ddp'''
_snake_case : Optional[int] = args.accumulate_grad_batches
_snake_case : Optional[Any] = None
_snake_case : int = '''auto'''
_snake_case : Optional[int] = pl.Trainer.from_argparse_args(
SCREAMING_SNAKE_CASE__ , weights_summary=SCREAMING_SNAKE_CASE__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=SCREAMING_SNAKE_CASE__ , val_check_interval=1 , num_sanity_val_steps=2 , **SCREAMING_SNAKE_CASE__ , )
if args.do_train:
trainer.fit(SCREAMING_SNAKE_CASE__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 317 |
from math import isclose, sqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[str] = point_y / 4 / point_x
A_ : Union[str, Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A_ : int = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A_ : List[str] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A_ : List[str] = outgoing_gradient**2 + 4
A_ : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A_ : Any = (point_y - outgoing_gradient * point_x) ** 2 - 100
A_ : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A_ : Any = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A_ : int = x_minus if isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else x_plus
A_ : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1.4 , SCREAMING_SNAKE_CASE = -9.6 ):
A_ : int = 0
A_ : float = first_x_coord
A_ : float = first_y_coord
A_ : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
A_ , A_ , A_ : List[str] = next_point(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 186 | 0 |
def UpperCamelCase ( snake_case__ : int ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
UpperCamelCase : Any = len(bin(_snake_case )[3:] )
UpperCamelCase : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def UpperCamelCase ( snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
UpperCamelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : List[str] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : Dict = 8
else:
UpperCamelCase : Union[str, Any] = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCamelCase ( snake_case__ : str , snake_case__ : List[Any] ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1":
UpperCamelCase : List[str] = 2
# New Code #
UpperCamelCase : Optional[int] = int(args.gradient_accumulation_steps )
UpperCamelCase : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config['lr']
UpperCamelCase : Optional[int] = int(config['num_epochs'] )
UpperCamelCase : List[Any] = int(config['seed'] )
UpperCamelCase : List[str] = int(config['batch_size'] )
UpperCamelCase : Optional[Any] = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case__ )
UpperCamelCase , UpperCamelCase : int = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
UpperCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
UpperCamelCase : int = model(**snake_case__ )
UpperCamelCase : Union[str, Any] = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Dict = model(**snake_case__ )
UpperCamelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , snake_case__ )
def UpperCamelCase ( ) -> Dict:
UpperCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=snake_case__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 103 | 0 |
from typing import Any
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = data
snake_case_ : Tuple = None
class __lowerCAmelCase :
def __init__(self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = None
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.head
while temp is not None:
print(temp.data , end=''' ''' )
snake_case_ : Optional[Any] = temp.next
print()
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = Node(__magic_name__ )
snake_case_ : Optional[int] = self.head
snake_case_ : Tuple = new_node
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
snake_case_ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case_ : int = node_a.next
snake_case_ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case_ : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
snake_case_ , snake_case_ : str = node_a.data, node_a.data
if __name__ == "__main__":
lowerCAmelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 279 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ : List[Any] = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ : Any = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase )
snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase )
snake_case_ : Optional[Any] = example.SerializeToString()
records.append(_UpperCamelCase )
return records
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit )
snake_case_ : int = dataset.select(range(_UpperCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase )
snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 )
snake_case_ : str = 0
snake_case_ : Optional[Any] = 0
for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ):
snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size]
snake_case_ : str = len(dataset_snapshot['''input_ids'''] )
snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ : Dict = get_serialized_examples(_UpperCamelCase )
with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file:
for i in range(len(_UpperCamelCase ) ):
snake_case_ : List[str] = serialized_examples[i]
out_file.write(_UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 279 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""GLPNFeatureExtractor"""]
__snake_case = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 |
"""simple docstring"""
import string
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
snake_case : List[str] = ""
for i in sequence:
snake_case : Optional[Any] = ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
snake_case : Dict = string.ascii_letters
snake_case : List[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __lowerCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running performance benchmarks..." )
snake_case : Optional[int] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 112 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase (_UpperCamelCase ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return max(metric_fn(_UpperCamelCase , _UpperCamelCase ) for gt in ground_truths )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : List[Any] = []
if args.gold_data_mode == "qa":
__lowerCAmelCase : Optional[Any] = pd.read_csv(_UpperCamelCase , sep='\t' , header=_UpperCamelCase )
for answer_list in data[1]:
__lowerCAmelCase : Union[str, Any] = ast.literal_eval(_UpperCamelCase )
answers.append(_UpperCamelCase )
else:
__lowerCAmelCase : Optional[int] = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : Optional[int] = [[reference] for reference in references]
__lowerCAmelCase : int = 0
for prediction, ground_truths in zip(_UpperCamelCase , _UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
fa += metric_max_over_ground_truths(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 100.0 * em / total
__lowerCAmelCase : str = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = args.k
__lowerCAmelCase : int = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : str = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : Tuple = 0
for hypo, reference in zip(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = set(hypo.split('\t' )[:k] )
__lowerCAmelCase : Optional[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowerCAmelCase : str = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
def strip_title(_UpperCamelCase ):
if title.startswith('"' ):
__lowerCAmelCase : Any = title[1:]
if title.endswith('"' ):
__lowerCAmelCase : List[Any] = title[:-1]
return title
__lowerCAmelCase : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase , truncation=_UpperCamelCase , )['input_ids'].to(args.device )
__lowerCAmelCase : Optional[int] = rag_model.rag.question_encoder(_UpperCamelCase )
__lowerCAmelCase : Dict = question_enc_outputs[0]
__lowerCAmelCase : Dict = rag_model.retriever(
_UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__lowerCAmelCase : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowerCAmelCase : Tuple = []
for docs in all_docs:
__lowerCAmelCase : int = [strip_title(_UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(_UpperCamelCase ) )
return provenance_strings
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
with torch.no_grad():
__lowerCAmelCase : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase , truncation=_UpperCamelCase )
__lowerCAmelCase : Tuple = inputs_dict.input_ids.to(args.device )
__lowerCAmelCase : Any = inputs_dict.attention_mask.to(args.device )
__lowerCAmelCase : Dict = rag_model.generate( # rag_model overwrites generate
_UpperCamelCase , attention_mask=_UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowerCAmelCase : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
if args.print_predictions:
for q, a in zip(_UpperCamelCase , _UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(_UpperCamelCase , _UpperCamelCase ) )
return answers
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=_UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=_UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=_UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=_UpperCamelCase , type=_UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=_UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=_UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=_UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=_UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=_UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=_UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=_UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=_UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=_UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__lowerCAmelCase : Tuple = parser.parse_args()
__lowerCAmelCase : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = {}
if args.model_type is None:
__lowerCAmelCase : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__lowerCAmelCase : Union[str, Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__lowerCAmelCase : Tuple = args.n_docs
if args.index_name is not None:
__lowerCAmelCase : List[Any] = args.index_name
if args.index_path is not None:
__lowerCAmelCase : Tuple = args.index_path
else:
__lowerCAmelCase : List[Any] = BartForConditionalGeneration
__lowerCAmelCase : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , _UpperCamelCase )
__lowerCAmelCase : str = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__lowerCAmelCase : Optional[Any] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(_UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(_UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__lowerCAmelCase : str = RagRetriever.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_UpperCamelCase , retriever=_UpperCamelCase , **_UpperCamelCase )
model.retriever.init_retrieval()
else:
__lowerCAmelCase : Dict = model_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__lowerCAmelCase : Tuple = []
for line in tqdm(_UpperCamelCase ):
questions.append(line.strip() )
if len(_UpperCamelCase ) == args.eval_batch_size:
__lowerCAmelCase : Union[str, Any] = evaluate_batch_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
preds_file.write('\n'.join(_UpperCamelCase ) + '\n' )
preds_file.flush()
__lowerCAmelCase : Optional[Any] = []
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase : List[str] = evaluate_batch_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
preds_file.write('\n'.join(_UpperCamelCase ) )
preds_file.flush()
score_fn(_UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ = get_args()
main(args)
| 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.')
| 86 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCamelCase_ = False
@skip_mps
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Any = StableDiffusionAttendAndExcitePipeline
__a: int = False
__a: Tuple = TEXT_TO_IMAGE_PARAMS
__a: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__a: str = TEXT_TO_IMAGE_IMAGE_PARAMS
__a: Any = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _lowercase ( cls ) -> Any:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def _lowercase ( cls ) -> str:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(_snake_case )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> Tuple:
'''simple docstring'''
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(_snake_case )
else:
lowerCAmelCase_ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase_ = lowerCAmelCase_ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu'
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ = self.get_dummy_inputs(_snake_case )
lowerCAmelCase_ = pipe(**_snake_case ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
lowerCAmelCase_ = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1e-3 )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _lowercase ( self ) -> str:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowercase ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def _lowercase ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = torch.manual_seed(5_1 )
lowerCAmelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=_snake_case , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCAmelCase_ = 'a painting of an elephant with glasses'
lowerCAmelCase_ = [5, 7]
lowerCAmelCase_ = pipe(
prompt=_snake_case , token_indices=_snake_case , guidance_scale=7.5 , generator=_snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 357 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = "▁"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
SCREAMING_SNAKE_CASE : str = {
"google/pegasus-xsum": 512,
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = PegasusTokenizer
lowercase_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="<pad>", lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase="<mask_2>", lowerCamelCase="<mask_1>", lowerCamelCase=None, lowerCamelCase=1_03, **lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : str = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase)}, but is'''
F''' {type(lowerCamelCase)}''')
_lowercase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase), self.offset - 1)
]
if len(set(lowerCamelCase)) != len(lowerCamelCase):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''')
_lowercase : Any = additional_special_tokens_extended
else:
_lowercase : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset)]
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, pad_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
_lowercase : Optional[Any] = vocab_file
_lowercase : List[str] = False if not self.vocab_file else True
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}''')
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase)
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Any = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
return (out_vocab_file,)
| 21 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 1 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] , snake_case : Dict )-> int:
'''simple docstring'''
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str , snake_case : List[Any] , snake_case : str="attention" )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : Dict = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
UpperCAmelCase__ : str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCAmelCase__ : Dict = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
UpperCAmelCase__ : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCAmelCase__ : Dict = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
UpperCAmelCase__ : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCAmelCase__ : Union[str, Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
UpperCAmelCase__ : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Union[str, Any]=False )-> Optional[int]:
'''simple docstring'''
if split_mlp_wi:
UpperCAmelCase__ : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
UpperCAmelCase__ : Optional[int] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
UpperCAmelCase__ : List[str] = (wi_a, wi_a)
else:
UpperCAmelCase__ : List[Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
UpperCAmelCase__ : Optional[Any] = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE__ ( snake_case : dict , *, snake_case : int , snake_case : bool , snake_case : bool = False )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = traverse_util.flatten_dict(variables["target"] )
UpperCAmelCase__ : Tuple = {"/".join(snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase__ : int = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , snake_case )
UpperCAmelCase__ : List[str] = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase__ : Tuple = old["token_embedder/embedding"]
# Encoder.
for i in range(snake_case ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase__ : Optional[Any] = tax_layer_norm_lookup(snake_case , snake_case , "encoder" , "pre_attention_layer_norm" )
UpperCAmelCase__ : int = tax_attention_lookup(snake_case , snake_case , "encoder" , "attention" )
UpperCAmelCase__ : int = layer_norm
UpperCAmelCase__ : Tuple = k.T
UpperCAmelCase__ : str = o.T
UpperCAmelCase__ : List[str] = q.T
UpperCAmelCase__ : str = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase__ : Optional[Any] = tax_layer_norm_lookup(snake_case , snake_case , "encoder" , "pre_mlp_layer_norm" )
UpperCAmelCase__ : Dict = tax_mlp_lookup(snake_case , snake_case , "encoder" , snake_case )
UpperCAmelCase__ : Any = layer_norm
if split_mlp_wi:
UpperCAmelCase__ : List[Any] = wi[0].T
UpperCAmelCase__ : Optional[Any] = wi[1].T
else:
UpperCAmelCase__ : Dict = wi.T
UpperCAmelCase__ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase__ : Tuple = tax_relpos_bias_lookup(
snake_case , snake_case , "encoder" ).T
UpperCAmelCase__ : Any = old["encoder/encoder_norm/scale"]
if not scalable_attention:
UpperCAmelCase__ : List[str] = tax_relpos_bias_lookup(
snake_case , 0 , "encoder" ).T
UpperCAmelCase__ : int = tax_relpos_bias_lookup(
snake_case , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase__ : List[str] = tax_layer_norm_lookup(snake_case , snake_case , "decoder" , "pre_self_attention_layer_norm" )
UpperCAmelCase__ : Tuple = tax_attention_lookup(snake_case , snake_case , "decoder" , "self_attention" )
UpperCAmelCase__ : Tuple = layer_norm
UpperCAmelCase__ : List[str] = k.T
UpperCAmelCase__ : List[Any] = o.T
UpperCAmelCase__ : Optional[int] = q.T
UpperCAmelCase__ : int = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase__ : List[Any] = tax_layer_norm_lookup(snake_case , snake_case , "decoder" , "pre_cross_attention_layer_norm" )
UpperCAmelCase__ : Union[str, Any] = tax_attention_lookup(snake_case , snake_case , "decoder" , "encoder_decoder_attention" )
UpperCAmelCase__ : Union[str, Any] = layer_norm
UpperCAmelCase__ : str = k.T
UpperCAmelCase__ : Any = o.T
UpperCAmelCase__ : Optional[Any] = q.T
UpperCAmelCase__ : List[str] = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase__ : Optional[int] = tax_layer_norm_lookup(snake_case , snake_case , "decoder" , "pre_mlp_layer_norm" )
UpperCAmelCase__ : Optional[Any] = tax_mlp_lookup(snake_case , snake_case , "decoder" , snake_case )
UpperCAmelCase__ : Dict = layer_norm
if split_mlp_wi:
UpperCAmelCase__ : Optional[Any] = wi[0].T
UpperCAmelCase__ : Optional[Any] = wi[1].T
else:
UpperCAmelCase__ : Dict = wi.T
UpperCAmelCase__ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase__ : List[str] = tax_relpos_bias_lookup(snake_case , snake_case , "decoder" ).T
UpperCAmelCase__ : Optional[Any] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase__ : Union[str, Any] = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : bool )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase__ : Union[str, Any] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase__ : Any = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
UpperCAmelCase__ : Dict = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : List[str] , snake_case : List[Any] , snake_case : str , snake_case : Dict )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Dict = checkpoints.load_tax_checkpoint(snake_case )
UpperCAmelCase__ : List[str] = convert_tax_to_pytorch(
snake_case , num_layers=config.num_layers , is_encoder_only=snake_case , scalable_attention=snake_case )
UpperCAmelCase__ : Union[str, Any] = make_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case , strict=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : bool = False , snake_case : bool = False , )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = MTaConfig.from_json_file(snake_case )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase__ : str = UMTaEncoderModel(snake_case )
else:
UpperCAmelCase__ : str = UMTaForConditionalGeneration(snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case , snake_case , snake_case , snake_case , snake_case )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case )
print("Done" )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 367 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298 | 0 |
from __future__ import annotations
class _lowerCamelCase:
def __init__( self, lowerCamelCase=None) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = data
_lowercase : List[str] = None
def __repr__( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = []
_lowercase : Dict = self
while temp:
string_rep.append(F'''{temp.data}''')
_lowercase : int = temp.next
return "->".join(lowerCamelCase)
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : List[str] = Node(elements_list[0] )
for i in range(1 , len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : str = current.next
return head
def UpperCamelCase_( lowerCamelCase_ ) -> None:
if head_node is not None and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase_( ) -> Dict:
from doctest import testmod
testmod()
_lowercase : str = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCamelCase_ )
print('Elements in Reverse:' )
print_reverse(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 21 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A__ : Union[str, Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
_a = '''rag'''
_a = True
def __init__( self : str , A_ : List[Any]=None , A_ : str=True , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : List[str]=None , A_ : List[str]=None , A_ : List[Any]=None , A_ : Union[str, Any]=" / " , A_ : Tuple=" // " , A_ : Any=5 , A_ : Optional[Any]=3_0_0 , A_ : Tuple=7_6_8 , A_ : Union[str, Any]=8 , A_ : Dict="wiki_dpr" , A_ : Optional[Any]="train" , A_ : Dict="compressed" , A_ : Optional[int]=None , A_ : List[str]=None , A_ : str=False , A_ : Dict=False , A_ : Dict=0.0 , A_ : List[str]=True , A_ : List[str]=False , A_ : List[Any]=False , A_ : Any=False , A_ : Optional[int]=True , A_ : int=None , **A_ : List[str] , ):
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCAmelCase_ : List[str] = kwargs.pop('''question_encoder''')
lowerCAmelCase_ : Tuple = question_encoder_config.pop('''model_type''')
lowerCAmelCase_ : Tuple = kwargs.pop('''generator''')
lowerCAmelCase_ : Dict = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : int = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : List[Any] = reduce_loss
lowerCAmelCase_ : Optional[Any] = label_smoothing
lowerCAmelCase_ : Union[str, Any] = exclude_bos_score
lowerCAmelCase_ : List[Any] = do_marginalize
lowerCAmelCase_ : int = title_sep
lowerCAmelCase_ : Optional[int] = doc_sep
lowerCAmelCase_ : List[str] = n_docs
lowerCAmelCase_ : int = max_combined_length
lowerCAmelCase_ : Union[str, Any] = dataset
lowerCAmelCase_ : int = dataset_split
lowerCAmelCase_ : Dict = index_name
lowerCAmelCase_ : Union[str, Any] = retrieval_vector_size
lowerCAmelCase_ : Optional[Any] = retrieval_batch_size
lowerCAmelCase_ : List[str] = passages_path
lowerCAmelCase_ : Any = index_path
lowerCAmelCase_ : int = use_dummy_dataset
lowerCAmelCase_ : Tuple = output_retrieved
lowerCAmelCase_ : List[Any] = do_deduplication
lowerCAmelCase_ : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
lowerCAmelCase_ : List[Any] = getattr(self.generator , '''forced_eos_token_id''' , A_)
@classmethod
def UpperCAmelCase__ ( cls : str , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Tuple = self.question_encoder.to_dict()
lowerCAmelCase_ : Dict = self.generator.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 103 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
def __init__( self :Tuple , _lowercase :Union[str, Any] , _lowercase :str=13 , _lowercase :Union[str, Any]=7 , _lowercase :Any=True , _lowercase :Any=True , _lowercase :List[Any]=True , _lowercase :int=True , _lowercase :Any=99 , _lowercase :Union[str, Any]=32 , _lowercase :Union[str, Any]=2 , _lowercase :List[str]=4 , _lowercase :List[Any]=37 , _lowercase :int="gelu" , _lowercase :int=0.1 , _lowercase :Any=0.1 , _lowercase :Dict=5_12 , _lowercase :Dict=16 , _lowercase :List[Any]=2 , _lowercase :List[Any]=0.02 , _lowercase :Optional[Any]=3 , _lowercase :Any=4 , _lowercase :Optional[Any]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 3_84
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = "gelu"
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 5_12
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = 1_28
lowercase__ = 2
lowercase__ = 9
lowercase__ = 1
lowercase__ = None
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[Any] , _lowercase :Optional[int] , _lowercase :int , _lowercase :Any , _lowercase :Tuple , _lowercase :List[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = TFConvBertModel(config=_lowercase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_lowercase )
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self :List[Any] , _lowercase :Dict , _lowercase :int , _lowercase :Optional[Any] , _lowercase :Dict , _lowercase :Any , _lowercase :Union[str, Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = TFConvBertForMaskedLM(config=_lowercase )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[Any] , _lowercase :Any , _lowercase :str , _lowercase :int , _lowercase :List[str] , _lowercase :Dict , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFConvBertForSequenceClassification(config=_lowercase )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self :Any , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :Dict , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Any , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFConvBertForMultipleChoice(config=_lowercase )
lowercase__ = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
lowercase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :Dict , _lowercase :Any , _lowercase :Any , _lowercase :Dict , _lowercase :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFConvBertForTokenClassification(config=_lowercase )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self :List[Any] , _lowercase :Dict , _lowercase :str , _lowercase :Any , _lowercase :int , _lowercase :int , _lowercase :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = TFConvBertForQuestionAnswering(config=_lowercase )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFConvBertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
lowercase__ = True
if hasattr(_lowercase , "use_cache" ):
lowercase__ = True
lowercase__ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase__ = getattr(self.model_tester , "key_length" , _lowercase )
for model_class in self.all_model_classes:
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = model_class(_lowercase )
lowercase__ = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
lowercase__ = os.path.join(_lowercase , "saved_model" , "1" )
lowercase__ = tf.keras.models.load_model(_lowercase )
lowercase__ = model(_lowercase )
if self.is_encoder_decoder:
lowercase__ = outputs["encoder_hidden_states"]
lowercase__ = outputs["encoder_attentions"]
else:
lowercase__ = outputs["hidden_states"]
lowercase__ = outputs["attentions"]
self.assertEqual(len(_lowercase ) , _lowercase )
lowercase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
lowercase__ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
lowercase__ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase__ = getattr(self.model_tester , "key_length" , _lowercase )
lowercase__ = getattr(self.model_tester , "key_length" , _lowercase )
def check_decoder_attentions_output(_lowercase :int ):
lowercase__ = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
lowercase__ = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase :Union[str, Any] ):
lowercase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = model_class(_lowercase )
lowercase__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
lowercase__ = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
lowercase__ = model_class(_lowercase )
lowercase__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(_lowercase )
lowercase__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(_lowercase )
lowercase__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(_lowercase )[0]
lowercase__ = [1, 6, 7_68]
self.assertEqual(output.shape , _lowercase )
lowercase__ = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 201 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( __magic_name__ ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__magic_name__ , "_dynamo" ):
return False
return isinstance(__magic_name__ , torch._dynamo.eval_frame.OptimizedModule )
def _A ( __magic_name__ , __magic_name__ = True ):
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(__magic_name__ )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(__magic_name__ , "forward" )
lowercase__ = model.__dict__.pop("_original_forward" , __magic_name__ )
if original_forward is not None:
while hasattr(__magic_name__ , "__wrapped__" ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(__magic_name__ , "_converted_to_transformer_engine" , __magic_name__ ):
convert_model(__magic_name__ , to_transformer_engine=__magic_name__ )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def _A ( ):
PartialState().wait_for_everyone()
def _A ( __magic_name__ , __magic_name__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__magic_name__ , __magic_name__ )
elif PartialState().local_process_index == 0:
torch.save(__magic_name__ , __magic_name__ )
@contextmanager
def _A ( **__magic_name__ ):
for key, value in kwargs.items():
lowercase__ = str(__magic_name__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( __magic_name__ ):
if not hasattr(__magic_name__ , "__qualname__" ) and not hasattr(__magic_name__ , "__name__" ):
lowercase__ = getattr(__magic_name__ , "__class__" , __magic_name__ )
if hasattr(__magic_name__ , "__qualname__" ):
return obj.__qualname__
if hasattr(__magic_name__ , "__name__" ):
return obj.__name__
return str(__magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
for key, value in source.items():
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = destination.setdefault(__magic_name__ , {} )
merge_dicts(__magic_name__ , __magic_name__ )
else:
lowercase__ = value
return destination
def _A ( __magic_name__ = None ):
if port is None:
lowercase__ = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 201 | 1 |
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
while ask_again:
lowerCAmelCase__ :Tuple = input(_lowerCamelCase )
try:
if default is not None and len(_lowerCamelCase ) == 0:
return default
return convert_value(_lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCamelCase )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[] , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = BulletMenu(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase__ :Dict = menu.run(default_choice=_lowerCamelCase )
return convert_value(_lowerCamelCase ) if convert_value is not None else result
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = int(_lowerCamelCase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = int(_lowerCamelCase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Tuple = int(_lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = int(_lowerCamelCase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :int = int(_lowerCamelCase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class _lowerCAmelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super()._format_usage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ :Any = usage.replace('<command> [<args>] ' , '' )
return usage
| 293 |
'''simple docstring'''
import string
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = """"""
for i in sequence:
__SCREAMING_SNAKE_CASE : Any = ord(_lowerCamelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Optional[Any] = string.ascii_letters
__SCREAMING_SNAKE_CASE : Union[str, Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowerCamelCase )] if c in letters else c for c in sequence )
def lowerCAmelCase_ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_lowerCamelCase )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)' , setup=_lowerCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 112 | 0 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , A : Any , A : Optional[Any] , A : str ):
_UpperCAmelCase : Optional[int] = name
_UpperCAmelCase : Optional[int] = value
_UpperCAmelCase : List[str] = weight
def __repr__( self : List[Any] ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def snake_case_ ( self : Any ):
return self.value
def snake_case_ ( self : Optional[Any] ):
return self.name
def snake_case_ ( self : Tuple ):
return self.weight
def snake_case_ ( self : Union[str, Any] ):
return self.value / self.weight
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[str] = []
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> List[Any]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = GPTSanJapaneseTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def snake_case_ ( self : Any ):
super().setUp()
# fmt: off
_UpperCAmelCase : Any = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : Optional[int] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_UpperCAmelCase : List[Any] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(A ) )
def snake_case_ ( self : int , **A : List[str] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , A : Any ):
_UpperCAmelCase : Optional[Any] = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_UpperCAmelCase : List[Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def snake_case_ ( self : Optional[Any] , A : str ):
_UpperCAmelCase , _UpperCAmelCase : str = self.get_input_output_texts(A )
_UpperCAmelCase : List[Any] = tokenizer.encode(A , add_special_tokens=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.decode(A , clean_up_tokenization_spaces=A )
return text, ids
def snake_case_ ( self : Any ):
pass # TODO add if relevant
def snake_case_ ( self : Union[str, Any] ):
pass # TODO add if relevant
def snake_case_ ( self : int ):
pass # TODO add if relevant
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Optional[int] = "こんにちは、世界。 こんばんは、㔺界。"
_UpperCAmelCase : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_UpperCAmelCase : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
_UpperCAmelCase : str = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase : Dict = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_UpperCAmelCase : Tuple = "こんにちは、、、、世界。こんばんは、、、、世界。"
_UpperCAmelCase : int = tokenizer.encode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : List[Any] = "こんにちは、世界。"
_UpperCAmelCase : List[str] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Any = "こんにちは、世界。こんばんは、世界。😀"
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(prefix_text + input_text )
_UpperCAmelCase : Tuple = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_UpperCAmelCase : Optional[int] = tokenizer.encode(A , prefix_text=A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(A )
_UpperCAmelCase : Tuple = tokenizer.decode(A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
@slow
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_UpperCAmelCase : Any = "こんにちは、世界。"
_UpperCAmelCase : List[Any] = "こんばんは、㔺界。😀"
_UpperCAmelCase : Optional[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[Any] = len(tokenizer.encode(A ) ) - 2
_UpperCAmelCase : List[str] = [1] + [0] * (len_prefix + len_text + 1)
_UpperCAmelCase : str = [1] * (len_prefix + len_text + 1) + [0]
_UpperCAmelCase : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
_UpperCAmelCase : Any = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCAmelCase : List[Any] = tokenizer(A , prefix_text=A ).token_type_ids
self.assertListEqual(A , A )
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Dict = tokenizer.encode("あンいワ" )
_UpperCAmelCase : str = tokenizer.encode("" , prefix_text="あンいワ" )
_UpperCAmelCase : Dict = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertEqual(tokenizer.decode(A ) , tokenizer.decode(A ) )
self.assertNotEqual(A , A )
self.assertNotEqual(A , A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_UpperCAmelCase : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_UpperCAmelCase : Tuple = tokenizer(A , padding=A )
_UpperCAmelCase : str = tokenizer.batch_encode_plus(A , padding=A )
# fmt: off
_UpperCAmelCase : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_UpperCAmelCase : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A )
self.assertListEqual(x_token.token_type_ids , A )
self.assertListEqual(x_token.attention_mask , A )
self.assertListEqual(x_token_a.input_ids , A )
self.assertListEqual(x_token_a.token_type_ids , A )
self.assertListEqual(x_token_a.attention_mask , A )
def snake_case_ ( self : List[Any] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case_ ( self : int ):
# tokenizer has no padding token
pass
| 202 | 1 |
from __future__ import annotations
from collections.abc import Callable
lowercase_ = list[list[float | int]]
def _snake_case( SCREAMING_SNAKE_CASE__ : Matrix , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE__ )]
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = 42
for row in range(SCREAMING_SNAKE_CASE__ ):
for col in range(SCREAMING_SNAKE_CASE__ ):
A__ = matrix[row][col]
A__ = vector[row][0]
A__ = 0
A__ = 0
while row < size and col < size:
# pivoting
A__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE__ ):
A__ = augmented[rowa][col] / augmented[row][col]
A__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE__ ):
for row in range(SCREAMING_SNAKE_CASE__ ):
A__ = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE__ )
]
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
A__ = [[0] for _ in range(SCREAMING_SNAKE_CASE__ )]
A__ = 42
A__ = 42
A__ = 42
A__ = 42
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE__ ):
for col in range(SCREAMING_SNAKE_CASE__ ):
A__ = (x_val + 1) ** (size - col - 1)
A__ = y_val
A__ = solve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def interpolated_func(SCREAMING_SNAKE_CASE__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE__ ) )
return interpolated_func
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _snake_case( SCREAMING_SNAKE_CASE__ : Callable[[int], int] = question_function , SCREAMING_SNAKE_CASE__ : int = 10 ) -> int:
'''simple docstring'''
A__ = [func(SCREAMING_SNAKE_CASE__ ) for x_val in range(1 , order + 1 )]
A__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ = 0
A__ = 42
A__ = 42
for poly in polynomials:
A__ = 1
while func(SCREAMING_SNAKE_CASE__ ) == poly(SCREAMING_SNAKE_CASE__ ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE__ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __A ( enum.Enum ):
a__ : str = 0
a__ : List[Any] = 1
a__ : str = 2
@add_end_docstrings(_lowerCAmelCase )
class __A ( _lowerCAmelCase ):
a__ : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__(self : Optional[Any] , *__a : Any , **__a : Optional[int] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ = None
if self.model.config.prefix is not None:
UpperCAmelCase_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
UpperCAmelCase_ = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ = {**self._forward_params, **forward_params}
def _lowercase (self : Any , __a : Optional[Any]=None , __a : List[str]=None , __a : int=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : List[Any]=None , **__a : str , ):
UpperCAmelCase_ = {}
if prefix is not None:
UpperCAmelCase_ = prefix
if prefix:
UpperCAmelCase_ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
UpperCAmelCase_ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, \'hole\']" )
UpperCAmelCase_ = handle_long_generation
preprocess_params.update(_lowercase )
UpperCAmelCase_ = generate_kwargs
UpperCAmelCase_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowercase (self : Optional[int] , *__a : Optional[int] , **__a : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__(self : List[str] , __a : str , **__a : Optional[Any] ):
return super().__call__(_lowercase , **_lowercase )
def _lowercase (self : Union[str, Any] , __a : Any , __a : Dict="" , __a : Union[str, Any]=None , **__a : Tuple ):
UpperCAmelCase_ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
UpperCAmelCase_ = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase_ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase_ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ = inputs["attention_mask"][:, -keep_length:]
return inputs
def _lowercase (self : Union[str, Any] , __a : List[str] , **__a : Optional[int] ):
UpperCAmelCase_ = model_inputs["input_ids"]
UpperCAmelCase_ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = 1
else:
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCAmelCase_ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
UpperCAmelCase_ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowercase (self : Optional[int] , __a : Union[str, Any] , __a : Optional[int]=ReturnType.FULL_TEXT , __a : List[str]=True ):
UpperCAmelCase_ = model_outputs["generated_sequence"][0]
UpperCAmelCase_ = model_outputs["input_ids"]
UpperCAmelCase_ = model_outputs["prompt_text"]
UpperCAmelCase_ = generated_sequence.numpy().tolist()
UpperCAmelCase_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ = text[prompt_length:]
UpperCAmelCase_ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 370 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Any = ["""pixel_values"""]
def __init__(self : Any , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : Dict[str, int] = None , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Dict , ):
super().__init__(**__a )
UpperCAmelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowercase (self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ):
UpperCAmelCase_ = get_size_dict(__a )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowercase (self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ):
UpperCAmelCase_ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def _lowercase (self : str , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Any , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowercase (self : Dict , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : Dict , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__a , param_name="crop_size" , default_to_square=__a )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__a )
if not is_batched(__a ):
UpperCAmelCase_ = [images]
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 106 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42
# setable values
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
snake_case_ = 42
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowerCamelCase__ = 1_000 , lowerCamelCase__ = 0.00_01 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "fixed_small" , lowerCamelCase__ = True , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = jnp.floataa , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = dtype
def lowercase_ ( self , lowerCamelCase__ = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
__lowerCamelCase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowerCamelCase = jnp.array(1.0 , dtype=self.dtype )
__lowerCamelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = () ) -> DDPMSchedulerState:
'''simple docstring'''
__lowerCamelCase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
'''simple docstring'''
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCamelCase = jnp.clip(_UpperCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCamelCase = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__lowerCamelCase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCamelCase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCamelCase = variance
__lowerCamelCase = state.common.betas[t]
__lowerCamelCase = (predicted_variance + 1) / 2
__lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = timestep
if key is None:
__lowerCamelCase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCamelCase = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__lowerCamelCase = None
# 1. compute alphas, betas
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase = jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCamelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCamelCase = jax.random.split(_UpperCAmelCase , num=1 )
__lowerCamelCase = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__lowerCamelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 90 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298 | 0 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase__ = True
for j in range(SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase__ = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 93 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE )
return flax_params
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase__ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = get_flax_param(SCREAMING_SNAKE_CASE )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE )
lowercase__ = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE )
lowercase__ = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
lowercase__ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
if use_large:
lowercase__ = 40_96
lowercase__ = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowerCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 93 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
UpperCAmelCase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : int = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = ["input_ids", "attention_mask"]
a : Dict = MBartTokenizer
a : List[int] = []
a : List[int] = []
def __init__( self, __magic_name__=None, __magic_name__=None, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="</s>", __magic_name__="<s>", __magic_name__="<unk>", __magic_name__="<pad>", __magic_name__="<mask>", __magic_name__=None, __magic_name__=None, __magic_name__=None, **__magic_name__, ) -> Tuple:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Tuple = AddedToken(__magic_name__, lstrip=__magic_name__, rstrip=__magic_name__ ) if isinstance(__magic_name__, __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__, tokenizer_file=__magic_name__, bos_token=__magic_name__, eos_token=__magic_name__, sep_token=__magic_name__, cls_token=__magic_name__, unk_token=__magic_name__, pad_token=__magic_name__, mask_token=__magic_name__, src_lang=__magic_name__, tgt_lang=__magic_name__, additional_special_tokens=__magic_name__, **__magic_name__, )
UpperCamelCase__ : Tuple = vocab_file
UpperCamelCase__ : int = False if not self.vocab_file else True
UpperCamelCase__ : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase__ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase__ : List[str] = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase__ : str = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase__ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ : Union[str, Any] = src_lang
UpperCamelCase__ : Tuple = self(__magic_name__, add_special_tokens=__magic_name__, return_tensors=__magic_name__, **__magic_name__ )
UpperCamelCase__ : List[str] = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase__ : Any = tgt_lang_id
return inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = "en_XX", __magic_name__ = None, __magic_name__ = "ro_RO", **__magic_name__, ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : str = src_lang
UpperCamelCase__ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__, __magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : List[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : str = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ : str = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__magic_name__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCamelCase__ : str = os.path.join(
__magic_name__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file, __magic_name__ )
return (out_vocab_file,)
| 201 |
from math import factorial
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(__UpperCAmelCase ) // (factorial(__UpperCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 201 | 1 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def lowercase ( _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[Any] ) ->Any:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Optional[int] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__snake_case : List[str] = getattr(_snake_case , _snake_case ).shape
else:
__snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : Optional[int] = value
elif weight_type == "weight_g":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : Optional[Any] = value
else:
__snake_case : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : Optional[Any] ) ->int:
"""simple docstring"""
__snake_case : str = []
__snake_case : int = fairseq_model.state_dict()
__snake_case : str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : List[Any] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__snake_case : List[str] = True
if "*" in mapped_key:
__snake_case : Optional[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Dict = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : List[Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : Optional[Any] = '''weight_v'''
elif "bias" in name:
__snake_case : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[str] = '''weight'''
else:
__snake_case : Dict = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : List[str] , _snake_case : str ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : Any = name.split('''.''' )
__snake_case : Tuple = int(items[0] )
__snake_case : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[Any]=None , _snake_case : int=None , _snake_case : Union[str, Any]=True ) ->str:
"""simple docstring"""
if config_path is not None:
__snake_case : int = UniSpeechSatConfig.from_pretrained(_snake_case )
else:
__snake_case : str = UniSpeechSatConfig()
__snake_case : Optional[Any] = ''''''
if is_finetuned:
__snake_case : Optional[Any] = UniSpeechSatForCTC(_snake_case )
else:
__snake_case : Tuple = UniSpeechSatForPreTraining(_snake_case )
__snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__snake_case : Any = model[0].eval()
recursively_load_weights(_snake_case , _snake_case )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_A : Any = TypeVar("""T""")
_A : Tuple = TypeVar("""U""")
class a__ ( Generic[T, U] ):
def __init__( self , _a , _a ):
lowercase : str = key
lowercase : Dict = val
lowercase : DoubleLinkedListNode[T, U] | None = None
lowercase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class a__ ( Generic[T, U] ):
def __init__( self ):
lowercase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_a , _a )
lowercase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_a , _a )
lowercase , lowercase : Union[str, Any] = self.rear, self.head
def __repr__( self ):
lowercase : Union[str, Any] = ["DoubleLinkedList"]
lowercase : int = self.head
while node.next is not None:
rep.append(str(_a ) )
lowercase : Union[str, Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_a )
def __magic_name__ ( self , _a ):
lowercase : Union[str, Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase : Optional[int] = node
lowercase : Union[str, Any] = previous
lowercase : Tuple = node
lowercase : Any = self.rear
def __magic_name__ ( self , _a ):
if node.prev is None or node.next is None:
return None
lowercase : List[str] = node.next
lowercase : Optional[int] = node.prev
lowercase : List[str] = None
lowercase : str = None
return node
class a__ ( Generic[T, U] ):
__lowerCAmelCase = {}
def __init__( self , _a ):
lowercase : DoubleLinkedList[T, U] = DoubleLinkedList()
lowercase : List[Any] = capacity
lowercase : Optional[int] = 0
lowercase : List[str] = 0
lowercase : str = 0
lowercase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , _a ):
return key in self.cache
def __magic_name__ ( self , _a ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase : DoubleLinkedListNode[T, U] = self.cache[key]
lowercase : List[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_a )
return node.val
self.miss += 1
return None
def __magic_name__ ( self , _a , _a ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase : Optional[int] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase : List[str] = DoubleLinkedListNode(_a , _a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase : Any = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase : str = value
self.list.add(_a )
@classmethod
def __magic_name__ ( cls , _a = 128 ):
def cache_decorator_inner(_a ) -> Callable[..., U]:
def cache_decorator_wrapper(*_a ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase : Dict = LRUCache(_a )
lowercase : List[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase : Optional[int] = func(*_a )
cls.decorator_function_to_instance_map[func].put(args[0] , _a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_a , "cache_info" , _a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_A : Optional[int] = """
Human: <<task>>
Assistant: """
_A : List[Any] = """huggingface-tools/default-prompts"""
_A : Optional[int] = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def __magic_name__ ( __snake_case : int , __snake_case : List[Any] , __snake_case : Dict="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
lowercase : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __snake_case ) is not None:
return prompt_or_repo_id
lowercase : Optional[int] = cached_file(
__snake_case , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__snake_case , "r" , encoding="utf-8" ) as f:
return f.read()
| 202 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : Any , __A : List[str] , __A : Any ) -> List[Any]:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def A (__A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = to_pil_image(__A )
UpperCAmelCase_ , UpperCAmelCase_ = pil_image.size
UpperCAmelCase_ = pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCAmelCase_ = [idx for idx, word in enumerate(__A ) if not word.strip()]
UpperCAmelCase_ = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
UpperCAmelCase_ = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase_ = []
for x, y, w, h in zip(__A , __A , __A , __A ):
UpperCAmelCase_ = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
UpperCAmelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = ['''pixel_values''']
def __init__( self : Dict , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : float = 1 / 255 , _snake_case : bool = True , _snake_case : Union[float, Iterable[float]] = None , _snake_case : Union[float, Iterable[float]] = None , _snake_case : bool = True , _snake_case : Optional[str] = None , _snake_case : Optional[str] = "" , **_snake_case : Optional[int] , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ = get_size_dict(_snake_case)
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_value
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase_ = apply_ocr
UpperCAmelCase_ = ocr_lang
UpperCAmelCase_ = tesseract_config
def lowerCamelCase ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(_snake_case)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""")
UpperCAmelCase_ = (size['''height'''], size['''width'''])
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Dict , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Union[float, Iterable[float]] , _snake_case : Union[float, Iterable[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[int]=None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : Union[float, Iterable[float]] = None , _snake_case : Union[float, Iterable[float]] = None , _snake_case : bool = None , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : List[str] , ):
"""simple docstring"""
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_snake_case)
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase_ = make_list_of_images(_snake_case)
if not valid_images(_snake_case):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''')
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_snake_case) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''')
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for image in images:
UpperCAmelCase_ , UpperCAmelCase_ = apply_tesseract(_snake_case , _snake_case , _snake_case)
words_batch.append(_snake_case)
boxes_batch.append(_snake_case)
if do_resize:
UpperCAmelCase_ = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_snake_case , scale=_snake_case) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_snake_case , _snake_case) for image in images]
UpperCAmelCase_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=_snake_case)
if apply_ocr:
UpperCAmelCase_ = words_batch
UpperCAmelCase_ = boxes_batch
return data
| 356 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
snake_case_ : int = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : List[str] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
_UpperCamelCase : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[str] = True
print(f'Building TensorFlow model from configuration: {config}' )
_UpperCamelCase : Optional[int] = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : str = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : Any = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
_UpperCamelCase : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
_UpperCamelCase : List[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
_UpperCamelCase : Tuple = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : int = pto[0].numpy()
_UpperCamelCase : Optional[Any] = tfo[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase_ , save_format='h5' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , ):
if args_model_type is None:
_UpperCamelCase : Optional[Any] = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : Optional[int] = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print('=' * 1_0_0 )
print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : str = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_UpperCamelCase : Union[str, Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : Tuple = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : List[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : Union[str, Any] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : int = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : Dict = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
snake_case_ : Tuple = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 83 |
"""simple docstring"""
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : Dict="sayef/fsner-bert-base-uncased" ):
super(lowercase_ ,self ).__init__()
lowerCAmelCase__ : int = AutoModel.from_pretrained(lowercase_ ,return_dict=lowercase_ )
lowerCAmelCase__ : Optional[int] = torch.nn.CosineSimilarity(3 ,1E-08 )
lowerCAmelCase__ : List[str] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : str ,**lowercase_ : int ):
return self.bert(**lowercase_ ).last_hidden_state
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Optional[int] ):
return token_embeddings.sum(2 ,keepdim=lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : int ,lowercase_ : str ,lowercase_ : Tuple=1 ):
return self.softmax(T * self.cos(lowercase_ ,lowercase_ ) )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[Any] = W_supports['''sizes'''].tolist()
lowerCAmelCase__ : Dict = W_supports['''start_token_id'''].item()
lowerCAmelCase__ : Union[str, Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase__ : Optional[Any] = self.BERT(**lowercase_ )
lowerCAmelCase__ : int = self.BERT(**lowercase_ )
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : int = W_supports['''input_ids'''] == start_token_id
lowerCAmelCase__ : Optional[Any] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(lowercase_ ):
if i == 0:
lowerCAmelCase__ : str = 0
else:
lowerCAmelCase__ : List[Any] = support_sizes[i - 1]
lowerCAmelCase__ : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase__ : List[Any] = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase__ : Union[str, Any] = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase__ : Any = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase__ : List[Any] = torch.vstack((p_starts, p_start) )
lowerCAmelCase__ : List[Any] = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase__ : Union[str, Any] = p_start
lowerCAmelCase__ : str = p_end
return p_starts, p_ends
| 106 | 0 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=lowercase_ , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=lowercase_ , default=5 )
parser.add_argument('--batch_size' , type=lowercase_ , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=lowercase_ , default=1 )
parser.add_argument('--freeze' , type=lowercase_ , default=lowercase_ )
parser.add_argument('--learning_rate' , type=lowercase_ , default=5e-4 )
parser.add_argument('--seed' , type=lowercase_ , default=0 )
parser.add_argument('--lr_scheduler_type' , type=lowercase_ , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=lowercase_ , default=10 )
parser.add_argument('--weight_decay' , type=lowercase_ , default=0.0_1 )
parser.add_argument('--output_dir' , type=lowercase_ , default='./results' )
return parser.parse_args()
snake_case_ = load("""accuracy""")
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase , UpperCAmelCase = eval_pred
UpperCAmelCase = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=lowercase_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :int , lowercase_ :int ) -> None:
super().__init__()
UpperCAmelCase = trainer
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :Tuple , **lowercase_ :Dict ) -> List[str]:
if control.should_evaluate:
UpperCAmelCase = deepcopy(lowercase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def _lowerCAmelCase ( ):
UpperCAmelCase = get_args()
set_seed(args.seed )
UpperCAmelCase = load_dataset('codeparrot/codecomplex' , split='train' )
UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase = train_test['test'].train_test_split(test_size=0.5 )
UpperCAmelCase = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase = tokenizer.eos_token
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase = False
UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(lowercase_ ):
UpperCAmelCase = tokenizer(example['src'] , truncation=lowercase_ , max_length=1024 )
UpperCAmelCase = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase = train_test_validation.map(
lowercase_ , batched=lowercase_ , remove_columns=train_test_validation['train'].column_names , )
UpperCAmelCase = DataCollatorWithPadding(tokenizer=lowercase_ )
UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
UpperCAmelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
print('Training...' )
trainer.add_callback(CustomCallback(lowercase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 181 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , lowercase_ :Optional[Any] , lowercase_ :int=13 , lowercase_ :Optional[Any]=7 , lowercase_ :List[str]=True , lowercase_ :Dict=True , lowercase_ :str=True , lowercase_ :Optional[Any]=True , lowercase_ :Dict=99 , lowercase_ :int=32 , lowercase_ :str=5 , lowercase_ :Dict=4 , lowercase_ :Tuple=37 , lowercase_ :Dict="gelu" , lowercase_ :List[str]=0.1 , lowercase_ :int=0.1 , lowercase_ :Any=5_12 , lowercase_ :Optional[Any]=16 , lowercase_ :Optional[int]=2 , lowercase_ :Union[str, Any]=0.02 , lowercase_ :Dict=False , lowercase_ :Tuple=True , lowercase_ :Optional[Any]="None" , lowercase_ :int=3 , lowercase_ :Tuple=4 , lowercase_ :Optional[int]=None , ) -> Tuple:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = relative_attention
UpperCAmelCase = position_biased_input
UpperCAmelCase = pos_att_type
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :Tuple , lowercase_ :str , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Optional[int] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Dict , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :int ) -> Any:
UpperCAmelCase = DebertaVaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Any , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :Optional[int] ) -> Dict:
UpperCAmelCase = DebertaVaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = DebertaVaForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DebertaVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase__ ( self :str ) -> Tuple:
pass
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_lowercase : Union[str, Any] = ""
_lowercase : int = ""
_lowercase : str = ""
_lowercase : List[str] = ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = tweepy.OAuthHandler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
auth.set_access_token(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = tweepy.API(__SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowercase_ : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowercase_ : str = api.user_timeline(screen_name=__SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(__SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowercase_ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__SCREAMING_SNAKE_CASE ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowercase_ : Tuple = api.user_timeline(
screen_name=__SCREAMING_SNAKE_CASE , count=200 , max_id=__SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(__SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowercase_ : Dict = alltweets[-1].id - 1
print(F'''...{len(__SCREAMING_SNAKE_CASE )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowercase_ : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
lowercase_ : List[str] = csv.writer(__SCREAMING_SNAKE_CASE )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 93 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ) -> None:
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 120 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Dict = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> Any:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
UpperCAmelCase_ : Tuple = img
UpperCAmelCase_ : List[Any] = img.shape[1]
UpperCAmelCase_ : Any = img.shape[0]
UpperCAmelCase_ : Optional[int] = dst_width
UpperCAmelCase_ : int = dst_height
UpperCAmelCase_ : Any = self.src_w / self.dst_w
UpperCAmelCase_ : List[str] = self.src_h / self.dst_h
UpperCAmelCase_ : Any = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def A__ ( self: List[str] ) -> Optional[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase_ : int = self.img[self.get_y(a__ )][self.get_x(a__ )]
def A__ ( self: Union[str, Any] ,lowerCamelCase_: int ) -> str:
return int(self.ratio_x * x )
def A__ ( self: List[Any] ,lowerCamelCase_: int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ ,UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread('''image_data/lena.jpg''', 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 365 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
snake_case : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
snake_case : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
snake_case : int = field(
default=1_0_2_4 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
snake_case : bool = field(
default=__a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
snake_case : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """A csv or a json file containing the training data."""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """A csv or a json file containing the validation data."""} )
snake_case : Optional[str] = field(default=__a , metadata={"""help""": """A csv or a json file containing the test data."""} )
def snake_case_ (self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
_UpperCAmelCase : List[str] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
default=__a , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case : bool = field(
default=__a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : Dict = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : int = data_args.train_file.split(""".""" )[-1]
_UpperCAmelCase : str = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Optional[Any] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
_UpperCAmelCase : List[str] = load_dataset("""csv""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : List[str] = load_dataset("""json""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : Optional[int] = raw_datasets["""train"""].features["""label"""].names
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase_ , )
_UpperCAmelCase : str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : int = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : Dict = {"""Refused""": 0, """Entailed""": 1}
_UpperCAmelCase : List[Any] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase : str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase_ ):
_UpperCAmelCase : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
_UpperCAmelCase : Union[str, Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : Tuple = examples["""statement"""]
_UpperCAmelCase : str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
_UpperCAmelCase : Optional[int] = tokenizer(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_UpperCAmelCase : int = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
_UpperCAmelCase : str = raw_datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_UpperCAmelCase : Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_UpperCAmelCase : Optional[int] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
_UpperCAmelCase : Tuple = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
_UpperCAmelCase : int = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : Dict = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : Optional[Any] = None
# Initialize our Trainer
_UpperCAmelCase : Optional[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Optional[Any] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = train_result.metrics
_UpperCAmelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
_UpperCAmelCase : Optional[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
_UpperCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : str = predict_dataset.remove_columns("""label""" )
_UpperCAmelCase : List[Any] = trainer.predict(lowerCAmelCase_ , metric_key_prefix="""predict""" ).predictions
_UpperCAmelCase : Dict = np.argmax(lowerCAmelCase_ , axis=1 )
_UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = label_list[item]
writer.write(f"{index}\t{item}\n" )
_UpperCAmelCase : Union[str, Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 170 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
a__ : List[str] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
a__ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
a__ : Tuple = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
a__ : Dict = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
a__ : Any = out / out.max() * 2_5_5
a__ : Tuple = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 54 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple:
'''simple docstring'''
A__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A__ = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A__ = 'cpu'
A__ = Path(SCREAMING_SNAKE_CASE__ )
# VAE DECODER
A__ = AutoencoderKL.from_pretrained(model_path + '/vae' )
A__ = vae_decoder.config.latent_channels
# forward only through the decoder part
A__ = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE__ , )
del vae_decoder
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 7 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__snake_case = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class __snake_case ( _UpperCamelCase ):
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ['input_ids', 'attention_mask']
__lowerCamelCase : List[int] = []
def __init__( self , snake_case__ , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , snake_case__ = None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase : Dict =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase : List[Any] =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCAmelCase : List[str] =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
UpperCAmelCase : Dict =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase : Optional[int] =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[Any] =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase : Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase : int =vocab_file
UpperCAmelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict ={self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.__dict__.copy()
UpperCAmelCase : Union[str, Any] =None
return state
def __setstate__( self , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : Optional[Any] ={}
UpperCAmelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
return self.sp_model.piece_to_id(_UpperCAmelCase )
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def UpperCAmelCase__ ( self , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[]
UpperCAmelCase : int =''
UpperCAmelCase : List[Any] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
UpperCAmelCase : Dict =True
UpperCAmelCase : Tuple =[]
else:
current_sub_tokens.append(_UpperCAmelCase )
UpperCAmelCase : List[str] =False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = True , **snake_case__ , ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =kwargs.pop('''use_source_tokenizer''' , _UpperCAmelCase )
UpperCAmelCase : List[Any] =self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase : str =[]
UpperCAmelCase : int =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
UpperCAmelCase : Dict =[]
sub_texts.append(_UpperCAmelCase )
else:
current_sub_text.append(_UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase : List[str] =re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(_UpperCAmelCase ) )
else:
UpperCAmelCase : str =''.join(_UpperCAmelCase )
UpperCAmelCase : str =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase : List[str] =self.clean_up_tokenization(_UpperCAmelCase )
return clean_text
else:
return text
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str =os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
UpperCAmelCase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Dict:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Any =[self.cls_token_id]
UpperCAmelCase : str =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[str]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 368 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : str = """rwkv"""
__lowerCamelCase : str = {"""max_position_embeddings""": """context_length"""}
def __init__( self , snake_case__=5_0277 , snake_case__=1024 , snake_case__=4096 , snake_case__=32 , snake_case__=None , snake_case__=None , snake_case__=1e-5 , snake_case__=0 , snake_case__=0 , snake_case__=6 , snake_case__=False , snake_case__=True , **snake_case__ , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int =vocab_size
UpperCAmelCase : List[str] =context_length
UpperCAmelCase : Any =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : str =attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase : List[Any] =intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase : Optional[int] =layer_norm_epsilon
UpperCAmelCase : int =rescale_every
UpperCAmelCase : Any =use_cache
UpperCAmelCase : List[str] =bos_token_id
UpperCAmelCase : Any =eos_token_id
super().__init__(
tie_word_embeddings=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 78 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__ = list[list[float | int]]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Matrix:
UpperCAmelCase__ : int = len(lowerCAmelCase__ )
UpperCAmelCase__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = matrix[row][col]
UpperCAmelCase__ : Tuple = vector[row][0]
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : int = 0
while row < size and col < size:
# pivoting
UpperCAmelCase__ : List[str] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = augmented[rowa][col] / augmented[row][col]
UpperCAmelCase__ : str = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
UpperCAmelCase__ : int = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase__ )
]
def a__ ( lowerCAmelCase__ ) -> Callable[[int], int]:
UpperCAmelCase__ : int = len(lowerCAmelCase__ )
UpperCAmelCase__ : Matrix = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
UpperCAmelCase__ : Matrix = [[0] for _ in range(lowerCAmelCase__ )]
UpperCAmelCase__ : Matrix
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = (x_val + 1) ** (size - col - 1)
UpperCAmelCase__ : Union[str, Any] = y_val
UpperCAmelCase__ : Any = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def a__ ( lowerCAmelCase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def a__ ( lowerCAmelCase__ = question_function , lowerCAmelCase__ = 10 ) -> int:
UpperCAmelCase__ : list[int] = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
UpperCAmelCase__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Callable[[int], int]
UpperCAmelCase__ : int
for poly in polynomials:
UpperCAmelCase__ : List[str] = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = tf.data.AUTOTUNE
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=_UpperCamelCase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=_UpperCamelCase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=_UpperCamelCase , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=_UpperCamelCase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=_UpperCamelCase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=_UpperCamelCase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=_UpperCamelCase , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=_UpperCamelCase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=_UpperCamelCase , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=_UpperCamelCase , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=_UpperCamelCase , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=_UpperCamelCase , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=_UpperCamelCase , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=_UpperCamelCase , help='''Model ID to upload to on the Hugging Face Hub.''' )
snake_case_ : str = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
snake_case_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(_UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase )
return tpu
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
for file in file_list:
snake_case_ : str = file.split('''/''' )[-1]
snake_case_ : List[Any] = re.search(R'''-\d+-(\d+)\.tfrecord''' , _UpperCamelCase ).group(1 )
snake_case_ : Dict = int(_UpperCamelCase )
num_samples += sample_count
return num_samples
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = count_samples(_UpperCamelCase )
snake_case_ : List[Any] = tf.data.Dataset.from_tensor_slices(_UpperCamelCase )
if shuffle:
snake_case_ : int = dataset.shuffle(len(_UpperCamelCase ) )
snake_case_ : Optional[int] = tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ : Dict = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) )
snake_case_ : Any = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ : Dict = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ : Tuple = dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase )
snake_case_ : List[Any] = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
snake_case_ : int = dataset.prefetch(_UpperCamelCase )
return dataset
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if not args.no_tpu:
snake_case_ : Any = initialize_tpu(_UpperCamelCase )
snake_case_ : Any = tf.distribute.TPUStrategy(_UpperCamelCase )
else:
snake_case_ : str = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ : List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
snake_case_ : List[str] = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
snake_case_ : int = count_samples(_UpperCamelCase )
snake_case_ : Union[str, Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ : int = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ : Tuple = TFAutoModelForMaskedLM.from_config(_UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ : int = create_optimizer(
num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCamelCase , metrics=['''accuracy'''] )
def decode_fn(_UpperCamelCase ):
snake_case_ : List[Any] = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ : List[Any] = DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors='''tf''' )
def mask_with_collator(_UpperCamelCase ):
# TF really needs an isin() function
snake_case_ : str = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
snake_case_ : str = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , )
return batch
snake_case_ : List[Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ : Tuple = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ : Union[str, Any] = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , )
snake_case_ : Any = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) )
model.fit(
_UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''biogpt'''
def __init__(self , __magic_name__=4_2384 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1024 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[int] = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = scale_embedding
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = layerdrop
snake_case_ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 279 | 0 |
'''simple docstring'''
import json
import sys
def UpperCamelCase_ ( A__ : Tuple , A__ : Optional[int] ):
'''simple docstring'''
with open(A__ , encoding="""utf-8""" ) as f:
lowerCAmelCase_ : int = json.load(A__ )
lowerCAmelCase_ : List[str] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(A__ ):
lowerCAmelCase_ : Union[str, Any] = results[benchmark_name]
lowerCAmelCase_ : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
lowerCAmelCase_ : int = """| metric |"""
lowerCAmelCase_ : Tuple = """|--------|"""
lowerCAmelCase_ : int = """| new / old (diff) |"""
for metric_name in sorted(A__ ):
lowerCAmelCase_ : Optional[int] = benchmark_res[metric_name]
lowerCAmelCase_ : int = metric_vals["""new"""]
lowerCAmelCase_ : int = metric_vals.get("""old""" , A__ )
lowerCAmelCase_ : Any = metric_vals.get("""diff""" , A__ )
lowerCAmelCase_ : Optional[Any] = f' {new_val:f}' if isinstance(A__ , (int, float) ) else """None"""
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(A__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(A__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(A__ ) )
if __name__ == "__main__":
__A : List[str] = sys.argv[1]
__A : str = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 120 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
pass
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any ) -> None:
lowerCAmelCase_ : Any = data
lowerCAmelCase_ : Node | None = None
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = self
lowerCAmelCase_ : Any = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase )
yield node.data
lowerCAmelCase_ : int = node.next_node
@property
def __lowercase ( self : str ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__A : Dict = Node(1)
__A : Optional[Any] = Node(2)
__A : int = Node(3)
__A : Optional[Any] = Node(4)
print(root_node.has_loop) # False
__A : Any = root_node.next_node
print(root_node.has_loop) # True
__A : List[Any] = Node(5)
__A : Dict = Node(6)
__A : str = Node(5)
__A : Dict = Node(6)
print(root_node.has_loop) # False
__A : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 120 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : List[Any] = num_of_nodes
A_ : list[list[int]] = []
A_ : dict[int, int] = {}
def _a (self , lowercase , lowercase , lowercase ):
self.m_edges.append([u_node, v_node, weight] )
def _a (self , lowercase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _a (self , lowercase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
A_ : Tuple = self.find_component(lowercase )
def _a (self , lowercase , lowercase , lowercase ):
if component_size[u_node] <= component_size[v_node]:
A_ : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase )
elif component_size[u_node] >= component_size[v_node]:
A_ : Dict = self.find_component(lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(lowercase )
def _a (self ):
A_ : Dict = []
A_ : int = 0
A_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
A_ : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
A_ : int = edge
A_ : Optional[int] = self.m_component[u]
A_ : Optional[int] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
A_ : List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase , lowercase ):
A_ : str = edge
A_ : Any = self.m_component[u]
A_ : List[str] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase , lowercase , lowercase )
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
A_ : Dict = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}' )
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
if tokenize_kwargs is None:
A_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A_ : str = truncation
A_ : List[str] = tokenize_kwargs
A_ : Dict = {}
if return_tensors is not None:
A_ : List[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _a (self , lowercase , **lowercase ):
A_ : Optional[int] = self.framework
A_ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
return model_inputs
def _a (self , lowercase ):
A_ : str = self.model(**lowercase )
return model_outputs
def _a (self , lowercase , lowercase=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self , *lowercase , **lowercase ):
return super().__call__(*lowercase , **lowercase )
| 135 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 99 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Union[str, Any] ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 170 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : Dict , __A : Tuple , __A : int , __A : Optional[Any]=None , __A : Dict=None ):
snake_case__ : int = start
snake_case__ : Optional[Any] = end
snake_case__ : List[str] = val
snake_case__ : str = (start + end) // 2
snake_case__ : Tuple = left
snake_case__ : Optional[Any] = right
def __repr__( self : List[str] ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Sequence , __A : Optional[int] ):
snake_case__ : Optional[int] = collection
snake_case__ : Union[str, Any] = function
if self.collection:
snake_case__ : Tuple = self._build_tree(0 , len(__A ) - 1 )
def _lowercase ( self : str , __A : Union[str, Any] , __A : Optional[Any] ):
self._update_tree(self.root , __A , __A )
def _lowercase ( self : Optional[Any] , __A : int , __A : Optional[Any] ):
return self._query_range(self.root , __A , __A )
def _lowercase ( self : Optional[Any] , __A : Tuple , __A : Any ):
if start == end:
return SegmentTreeNode(__A , __A , self.collection[start] )
snake_case__ : Dict = (start + end) // 2
snake_case__ : Tuple = self._build_tree(__A , __A )
snake_case__ : Tuple = self._build_tree(mid + 1 , __A )
return SegmentTreeNode(__A , __A , self.fn(left.val , right.val ) , __A , __A )
def _lowercase ( self : Any , __A : str , __A : int , __A : Any ):
if node.start == i and node.end == i:
snake_case__ : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __A , __A )
else:
self._update_tree(node.right , __A , __A )
snake_case__ : Union[str, Any] = self.fn(node.left.val , node.right.val )
def _lowercase ( self : Optional[int] , __A : List[Any] , __A : List[str] , __A : List[str] ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __A , __A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __A , node.mid ) , self._query_range(node.right , node.mid + 1 , __A ) , )
else:
# range in right child tree
return self._query_range(node.right , __A , __A )
def _lowercase ( self : List[Any] ):
if self.root is not None:
snake_case__ : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
snake_case__ : Union[str, Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
__lowerCamelCase : Optional[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 370 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->list:
'''simple docstring'''
if n_term == "":
return []
a : list = []
for temp in range(int(_lowercase ) ):
series.append(F"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
a : Any = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 105 |
"""simple docstring"""
import requests
snake_case_ = """""" # <-- Put your OpenWeatherMap appid here!
snake_case_ = """https://api.openweathermap.org/data/2.5/"""
def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 78 | 0 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _a ( __lowerCAmelCase ):
def __init__( self : str, *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
_UpperCamelCase : Any = {}
def snake_case ( self : Any, lowerCAmelCase__ : Dict, *lowerCAmelCase__ : Union[str, Any], **lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = super().add_tokens(lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def snake_case ( self : str, lowerCAmelCase__ : Tuple, *lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Tuple=1, **lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ )
output.append(lowerCamelCase__ )
else:
_UpperCamelCase : int = []
for i in range(lowerCamelCase__ ):
_UpperCamelCase : Tuple = placeholder_token + f"""_{i}"""
self.try_adding_tokens(lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ )
output.append(lowerCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
_UpperCamelCase : int = output
def snake_case ( self : int, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : List[str]=False, lowerCAmelCase__ : Optional[int]=1.0 ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
_UpperCamelCase : int = []
for i in range(len(lowerCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=lowerCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCamelCase : Any = self.token_map[placeholder_token]
_UpperCamelCase : Union[str, Any] = tokens[: 1 + int(len(lowerCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCamelCase : List[str] = copy.copy(lowerCamelCase__ )
random.shuffle(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = text.replace(lowerCamelCase__, ''' '''.join(lowerCamelCase__ ) )
return text
def __call__( self : Dict, lowerCAmelCase__ : Tuple, *lowerCAmelCase__ : List[Any], lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : List[str]=1.0, **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase__, vector_shuffle=lowerCamelCase__, prop_tokens_to_load=lowerCamelCase__ ), *lowerCamelCase__, **lowerCamelCase__, )
def snake_case ( self : str, lowerCAmelCase__ : Optional[Any], *lowerCAmelCase__ : Any, lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : str=1.0, **lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase__, vector_shuffle=lowerCamelCase__, prop_tokens_to_load=lowerCamelCase__ ), *lowerCamelCase__, **lowerCamelCase__, )
| 357 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase_ ="""bart"""
UpperCamelCase_ =True
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Union[str, Any] = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : str = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : List[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Dict = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : List[Any] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : List[Any] = faiss.StandardGpuResources()
_UpperCamelCase : List[str] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : Tuple = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCamelCase : Optional[int] = faiss.IndexFlatIP(128 )
_UpperCamelCase : Tuple = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase )
wikiaab_gpu_index_flat.add(_lowercase ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase : Tuple = (None, None)
_UpperCamelCase : List[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
_UpperCamelCase : Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_UpperCamelCase : Any = elia['''train_eli5''']
_UpperCamelCase : Union[str, Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
_UpperCamelCase : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowercase )
return (elia_train, eli5_train_q_index)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_indexes()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_models()
UpperCamelCase_ , UpperCamelCase_ =load_train_data()
def a_ ( _lowercase , _lowercase=10 ):
_UpperCamelCase : Any = embed_questions_for_retrieval([question] , _lowercase , _lowercase )
_UpperCamelCase , _UpperCamelCase : List[Any] = eli5_train_q_index.search(_lowercase , _lowercase )
_UpperCamelCase : Tuple = [elia_train[int(_lowercase )] for i in I[0]]
return nn_examples
def a_ ( _lowercase , _lowercase="wiki40b" , _lowercase="dense" , _lowercase=10 ):
if source == "none":
_UpperCamelCase , _UpperCamelCase : List[str] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase : Dict = query_qa_dense_index(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
_UpperCamelCase , _UpperCamelCase : List[str] = query_es_index(
_lowercase , _lowercase , index_name='''english_wiki40b_snippets_100w''' , n_results=_lowercase , )
_UpperCamelCase : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : List[Any] = '''question: {} context: {}'''.format(_lowercase , _lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase : None),
} )
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase=64 , _lowercase=256 , _lowercase=False , _lowercase=2 , _lowercase=0.95 , _lowercase=0.8 ):
with torch.no_grad():
_UpperCamelCase : List[Any] = qa_sas_generate(
_lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
UpperCamelCase_ ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
UpperCamelCase_ ="""
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase_ ="""
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase_ =[
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
UpperCamelCase_ =st.sidebar.checkbox("""Demo options""")
if demo_options:
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
UpperCamelCase_ =action_list.index(action_st)
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
UpperCamelCase_ =show_type == """Show full text of passages"""
else:
UpperCamelCase_ =3
UpperCamelCase_ =True
UpperCamelCase_ =st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
UpperCamelCase_ ="""
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
UpperCamelCase_ ="""wiki40b"""
UpperCamelCase_ ="""dense"""
UpperCamelCase_ ="""beam"""
UpperCamelCase_ =2
UpperCamelCase_ =64
UpperCamelCase_ =256
UpperCamelCase_ =None
UpperCamelCase_ =None
UpperCamelCase_ =st.sidebar.checkbox("""Generation options""")
if generate_options:
UpperCamelCase_ ="""
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
UpperCamelCase_ =st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
UpperCamelCase_ =st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase_ =st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase_ =st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase_ =None
# start main text
UpperCamelCase_ =[
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
UpperCamelCase_ =st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase_ =st.text_input("""Enter your question here:""", """""")
else:
UpperCamelCase_ =question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""dense""", n_results=10)
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""sparse""", n_results=10)
UpperCamelCase_ =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase_ =support_list[:10]
UpperCamelCase_ ="""<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase_ , UpperCamelCase_ =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
UpperCamelCase_ ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
UpperCamelCase_ =res[1].strip()
if sec_titles == "":
UpperCamelCase_ ="""[{}]({})""".format(res[0], wiki_url)
else:
UpperCamelCase_ =sec_titles.split(""" & """)
UpperCamelCase_ =""" & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase_ =find_nearest_training(question)
UpperCamelCase_ =nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
UpperCamelCase_ =[
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
UpperCamelCase_ ="""
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 128 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 279 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : str = 2
while i * i <= n:
UpperCAmelCase__ : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a__ ( ) -> Any:
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase__ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 299 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean
UpperCAmelCase__ : Optional[int] = image_std
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : int = do_pad
def lowercase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Optional[int] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : int = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : str = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase__ : Optional[int] = DetaImageProcessor()
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : int = json.loads(f.read() )
UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase__ : Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] ={}
if train_file is not None:
SCREAMING_SNAKE_CASE_: List[Any] =[train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE_: List[Any] =[eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =[test_file]
SCREAMING_SNAKE_CASE_: Optional[int] =datasets.load_dataset("""csv""" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE_: Optional[Any] =features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE_: Dict =list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE_: List[str] ={label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE_: Dict =tokenizer.model_input_names
SCREAMING_SNAKE_CASE_: int ={}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE_: Optional[int] =ds[k].map(
lambda lowercase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE_: Optional[Any] =ds[k].map(
lambda lowercase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE_: Optional[Any] ={k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_: str =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE_: str ={k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_: Optional[int] =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE_: List[Any] ={k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_: Dict =labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE_: Optional[Any] =(
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE_: int =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE_: Tuple =(
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE_: List[Any] =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE_: int =(
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class a :
UpperCamelCase : str = field(metadata={'help': 'Which column contains the label'} )
UpperCamelCase : Optional[int] = field(default=a__ , metadata={'help': 'The path of the training file'} )
UpperCamelCase : Tuple = field(default=a__ , metadata={'help': 'The path of the development file'} )
UpperCamelCase : List[Any] = field(default=a__ , metadata={'help': 'The path of the test file'} )
UpperCamelCase : List[Any] = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase : Any = field(
default=a__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class a :
UpperCamelCase : List[Any] = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase : Optional[int] = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase : Optional[int] = field(
default=a__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase : Optional[int] = field(default=a__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase : Dict = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_: str =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE_: Any =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE_: List[str] =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase ) -> Dict:
SCREAMING_SNAKE_CASE_: List[str] =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE_: Dict =TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_: int ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE_: Any =trainer.evaluate()
SCREAMING_SNAKE_CASE_: List[str] =os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 173 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__A = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__A = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=4 , UpperCAmelCase : str=False ):
__lowerCamelCase : Dict = compute_bleu(
reference_corpus=UpperCAmelCase , translation_corpus=UpperCAmelCase , max_order=UpperCAmelCase , smooth=UpperCAmelCase )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 135 | 0 |
from ...configuration_utils import PretrainedConfig
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : int ="""bert-generation"""
def __init__( self , UpperCamelCase_=5_0358 , UpperCamelCase_=1024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :int = vocab_size
lowercase_ :Any = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Union[str, Any] = num_attention_heads
lowercase_ :int = hidden_act
lowercase_ :Optional[int] = intermediate_size
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :int = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = max_position_embeddings
lowercase_ :Dict = initializer_range
lowercase_ :Dict = layer_norm_eps
lowercase_ :List[Any] = position_embedding_type
lowercase_ :Tuple = use_cache
| 252 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = False ):
lowercase_ :List[str] = scheduler
lowercase_ :Optional[Any] = optimizers if isinstance(UpperCamelCase_ , (list, tuple) ) else [optimizers]
lowercase_ :Tuple = split_batches
lowercase_ :str = step_with_optimizer
lowercase_ :int = GradientState()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowercase_ :Optional[Any] = AcceleratorState().num_processes
for _ in range(UpperCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_last_lr()
def UpperCamelCase ( self ):
return self.scheduler.state_dict()
def UpperCamelCase ( self , UpperCamelCase_ ):
self.scheduler.load_state_dict(UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_lr()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.scheduler.print_lr(*UpperCamelCase_ , **UpperCamelCase_ )
| 252 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase_ ( UpperCAmelCase__ ):
__lowerCamelCase : Dict = """switch_transformers"""
__lowerCamelCase : str = ["""past_key_values"""]
__lowerCamelCase : Dict = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , _lowerCAmelCase=32128 , _lowerCAmelCase=768 , _lowerCAmelCase=64 , _lowerCAmelCase=2048 , _lowerCAmelCase=64 , _lowerCAmelCase=12 , _lowerCAmelCase=3 , _lowerCAmelCase=12 , _lowerCAmelCase=3 , _lowerCAmelCase=12 , _lowerCAmelCase=8 , _lowerCAmelCase=False , _lowerCAmelCase=0.01 , _lowerCAmelCase="float32" , _lowerCAmelCase=False , _lowerCAmelCase=32 , _lowerCAmelCase=128 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=0.001 , _lowerCAmelCase=0.001 , _lowerCAmelCase=1.0 , _lowerCAmelCase="relu" , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ) -> List[str]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = d_model
_lowerCAmelCase = d_kv
_lowerCAmelCase = d_ff
_lowerCAmelCase = num_sparse_encoder_layers
_lowerCAmelCase = num_layers
_lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
_lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = num_experts
_lowerCAmelCase = expert_capacity
_lowerCAmelCase = router_bias
_lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_lowerCAmelCase = router_dtype
_lowerCAmelCase = router_ignore_padding_tokens
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = relative_attention_max_distance
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = feed_forward_proj
_lowerCAmelCase = use_cache
_lowerCAmelCase = add_router_probs
_lowerCAmelCase = router_z_loss_coef
_lowerCAmelCase = router_aux_loss_coef
_lowerCAmelCase = self.feed_forward_proj.split("-" )
_lowerCAmelCase = act_info[-1]
_lowerCAmelCase = act_info[0] == 'gated'
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCAmelCase = 'gelu_new'
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , )
| 158 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 286 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : str = to_pil_image(_lowerCAmelCase )
snake_case__ , snake_case__ : Optional[int] = pil_image.size
snake_case__ : int = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="""dict""" , config=_lowerCAmelCase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
snake_case__ : Optional[Any] = [idx for idx, word in enumerate(_lowerCAmelCase ) if not word.strip()]
snake_case__ : Dict = [word for idx, word in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : List[str] = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : Union[str, Any] = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : int = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : List[Any] = [coord for idx, coord in enumerate(_lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case__ : Tuple = []
for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(_lowerCAmelCase )
# finally, normalize the bounding boxes
snake_case__ : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = ["pixel_values"]
def __init__( self : Tuple , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : float = 1 / 255 , snake_case_ : bool = True , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = True , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "" , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
snake_case__ : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
snake_case__ : Union[str, Any] = get_size_dict(snake_case_ )
snake_case__ : Optional[Any] = do_resize
snake_case__ : Tuple = size
snake_case__ : Tuple = resample
snake_case__ : Any = do_rescale
snake_case__ : str = rescale_value
snake_case__ : Union[str, Any] = do_normalize
snake_case__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case__ : str = apply_ocr
snake_case__ : Tuple = ocr_lang
snake_case__ : Tuple = tesseract_config
def lowerCamelCase ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case__ : str = (size["""height"""], size["""width"""])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : List[str] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : Union[str, Any]=None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Optional[Any] , ):
snake_case__ : str = do_resize if do_resize is not None else self.do_resize
snake_case__ : Tuple = size if size is not None else self.size
snake_case__ : Tuple = get_size_dict(snake_case_ )
snake_case__ : Optional[Any] = resample if resample is not None else self.resample
snake_case__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Any = image_mean if image_mean is not None else self.image_mean
snake_case__ : Optional[Any] = image_std if image_std is not None else self.image_std
snake_case__ : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case__ : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case__ : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case__ : Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
snake_case__ : List[Any] = [to_numpy_array(snake_case_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
snake_case__ : Optional[int] = []
snake_case__ : Dict = []
for image in images:
snake_case__ , snake_case__ : List[str] = apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
snake_case__ : Tuple = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
snake_case__ : Optional[int] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
snake_case__ : List[str] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
snake_case__ : Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=snake_case_ )
if apply_ocr:
snake_case__ : Tuple = words_batch
snake_case__ : Union[str, Any] = boxes_batch
return data
| 43 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__a = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
__a = "sshleifer/student_marian_en_ro_6_1"
__a = "sshleifer/tiny-mbart"
@require_torch
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Any , snake_case_ : List[str]=False , snake_case_ : Tuple=None , snake_case_ : Dict=True , snake_case_ : Any=True , snake_case_ : Tuple=True , snake_case_ : List[str]=True , ):
snake_case__ : List[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
snake_case__ : int = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case__ : Tuple = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ : Dict = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase ( self : List[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ )
@require_torch_multi_gpu
def lowerCamelCase ( self : Tuple ):
self.run_seqaseq_quick(distributed=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : List[str] ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=snake_case_ )
@require_apex
@require_torch_gpu
def lowerCamelCase ( self : str ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[str, Any] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ : Any = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case__ : Optional[int] = experiments[experiment_id]
snake_case__ : Optional[int] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case__ : Union[str, Any] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data["""extra_args_str"""] )
snake_case__ : str = len(re.findall(snake_case_ , cl.err ) )
self.assertEqual(snake_case_ , data["""n_matches"""] )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=snake_case_ , )
# Check metrics
snake_case__ : Dict = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : List[str] = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[str] = eval_metrics[0]
snake_case__ : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
# test if do_predict saves generations and metrics
snake_case__ : Optional[int] = os.listdir(snake_case_ )
snake_case__ : List[str] = {os.path.basename(snake_case_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase ( self : List[str] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ : str ) -> Tuple[int, float]:
snake_case__ : Dict = """--skip_memory_metrics 0"""
snake_case__ : Optional[int] = self.run_trainer(
max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
snake_case__ : Optional[Any] = TrainerState.load_from_json(Path(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : Optional[int] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case__ : Tuple = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case__ : Optional[int] = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ : Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ : int = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
snake_case_ , snake_case_ , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def lowerCamelCase ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : int , snake_case_ : float = 3E-3 , snake_case_ : str = "adafactor" , snake_case_ : bool = False , snake_case_ : str = None , snake_case_ : int = 0 , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = None , ):
snake_case__ : Optional[Any] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case__ : List[Any] = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(snake_case_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(snake_case_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
snake_case__ : List[Any] = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(snake_case_ )}\n ".split()
snake_case__ : Dict = """
--do_predict
""".split()
snake_case__ : List[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ : Any = get_gpu_count()
snake_case__ : Optional[int] = get_torch_dist_unique_port()
snake_case__ : List[str] = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
snake_case__ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
else:
snake_case__ : str = ["""run_translation.py"""] + args
with patch.object(snake_case_ , """argv""" , snake_case_ ):
main()
return output_dir
| 43 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Any =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase):
print("Loading config file...")
def flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="."):
UpperCamelCase_ = []
for k, v in d.items():
UpperCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(_lowerCAmelCase , collections.abc.MutableMapping):
items.extend(flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase , sep=_lowerCAmelCase).items())
else:
items.append((new_key, v))
return dict(_lowerCAmelCase)
UpperCamelCase_ = argparse.Namespace()
with open(_lowerCAmelCase , "r") as yaml_file:
try:
UpperCamelCase_ = yaml.load(_lowerCAmelCase , Loader=yaml.FullLoader)
UpperCamelCase_ = flatten_yaml_as_dict(_lowerCAmelCase)
for k, v in flat_cfg.items():
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(_lowerCAmelCase , str(_lowerCAmelCase)))
return config
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = MobileViTVaConfig()
UpperCamelCase_ = False
# dataset
if task_name.startswith("imagenet1k_"):
UpperCamelCase_ = 10_00
if int(task_name.strip().split("_")[-1]) == 3_84:
UpperCamelCase_ = 3_84
else:
UpperCamelCase_ = 2_56
UpperCamelCase_ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_"):
UpperCamelCase_ = 2_10_00
if int(task_name.strip().split("_")[-1]) == 3_84:
UpperCamelCase_ = 3_84
else:
UpperCamelCase_ = 2_56
UpperCamelCase_ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_"):
UpperCamelCase_ = 1_51
UpperCamelCase_ = 5_12
UpperCamelCase_ = "ade20k-id2label.json"
UpperCamelCase_ = True
elif task_name.startswith("voc_"):
UpperCamelCase_ = 21
UpperCamelCase_ = 5_12
UpperCamelCase_ = "pascal-voc-id2label.json"
UpperCamelCase_ = True
# orig_config
UpperCamelCase_ = load_orig_config_file(_lowerCAmelCase)
assert getattr(_lowerCAmelCase , "model.classification.name" , -1) == "mobilevit_v2", "Invalid model"
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.classification.mitv2.width_multiplier" , 1.0)
assert (
getattr(_lowerCAmelCase , "model.classification.mitv2.attn_norm_layer" , -1) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.classification.activation.name" , "swish")
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.output_stride" , 16)
if "_deeplabv3" in task_name:
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36])
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_out_channels" , 5_12)
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_dropout" , 0.1)
# id2label
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset") , "r"))
UpperCamelCase_ = {int(_lowerCAmelCase): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = dct.pop(_lowerCAmelCase)
UpperCamelCase_ = val
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=False):
if base_model:
UpperCamelCase_ = ""
else:
UpperCamelCase_ = "mobilevitv2."
UpperCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCamelCase_ = k[8:]
else:
UpperCamelCase_ = k
if ".block." in k:
UpperCamelCase_ = k_new.replace(".block." , ".")
if ".conv." in k:
UpperCamelCase_ = k_new.replace(".conv." , ".convolution.")
if ".norm." in k:
UpperCamelCase_ = k_new.replace(".norm." , ".normalization.")
if "conv_1." in k:
UpperCamelCase_ = k_new.replace("conv_1." , f"""{model_prefix}conv_stem.""")
for i in [1, 2]:
if f"""layer_{i}.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""")
if ".exp_1x1." in k:
UpperCamelCase_ = k_new.replace(".exp_1x1." , ".expand_1x1.")
if ".red_1x1." in k:
UpperCamelCase_ = k_new.replace(".red_1x1." , ".reduce_1x1.")
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""")
if f"""layer_{i}.1.local_rep.0.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""")
if f"""layer_{i}.1.local_rep.1.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""")
for i in [3, 4, 5]:
if i == 3:
UpperCamelCase_ = [0, 1]
elif i == 4:
UpperCamelCase_ = [0, 1, 2, 3]
elif i == 5:
UpperCamelCase_ = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
UpperCamelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""")
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
UpperCamelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""")
if f"""layer_{i}.1.conv_proj.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""")
if "pre_norm_attn.0." in k:
UpperCamelCase_ = k_new.replace("pre_norm_attn.0." , "layernorm_before.")
if "pre_norm_attn.1." in k:
UpperCamelCase_ = k_new.replace("pre_norm_attn.1." , "attention.")
if "pre_norm_ffn.0." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after.")
if "pre_norm_ffn.1." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1.")
if "pre_norm_ffn.3." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2.")
if "classifier.1." in k:
UpperCamelCase_ = k_new.replace("classifier.1." , "classifier.")
if "seg_head." in k:
UpperCamelCase_ = k_new.replace("seg_head." , "segmentation_head.")
if ".aspp_layer." in k:
UpperCamelCase_ = k_new.replace(".aspp_layer." , ".")
if ".aspp_pool." in k:
UpperCamelCase_ = k_new.replace(".aspp_pool." , ".")
rename_keys.append((k, k_new))
return rename_keys
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head."):
keys_to_ignore.append(_lowerCAmelCase)
for k in keys_to_ignore:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase)
def _lowerCAmelCase ():
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCamelCase_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase).raw)
return im
@torch.no_grad()
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_mobilevitva_config(_lowerCAmelCase , _lowerCAmelCase)
# load original state_dict
UpperCamelCase_ = torch.load(_lowerCAmelCase , map_location="cpu")
# load huggingface model
if task_name.startswith("ade20k_") or task_name.startswith("voc_"):
UpperCamelCase_ = MobileViTVaForSemanticSegmentation(_lowerCAmelCase).eval()
UpperCamelCase_ = False
else:
UpperCamelCase_ = MobileViTVaForImageClassification(_lowerCAmelCase).eval()
UpperCamelCase_ = False
# remove and rename some keys of load the original model
UpperCamelCase_ = checkpoint
remove_unused_keys(_lowerCAmelCase)
UpperCamelCase_ = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# load modified state_dict
model.load_state_dict(_lowerCAmelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
UpperCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt")
UpperCamelCase_ = model(**_lowerCAmelCase)
# verify classification model
if task_name.startswith("imagenet"):
UpperCamelCase_ = outputs.logits
UpperCamelCase_ = logits.argmax(-1).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx])
if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCamelCase_ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01])
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4)
Path(_lowerCAmelCase).mkdir(exist_ok=_lowerCAmelCase)
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(_lowerCAmelCase)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase : Any =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 128 | 0 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ = 101 ) -> List[str]:
_UpperCAmelCase : Dict = length
def __len__( self ) -> Any:
return self.length
def __getitem__( self ,a_ ) -> int:
return i
class lowercase :
"""simple docstring"""
def __call__( self ,a_ ) -> Any:
return {"input_ids": torch.tensor(a_ ), "labels": torch.tensor(a_ )}
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase : Any = nn.Linear(120 ,80 )
def _snake_case ( self ,a_ ,a_=None ) -> Any:
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@require_torch_neuroncore
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_UpperCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Tuple = f'''--output_dir {output_dir}'''.split()
_UpperCAmelCase : Any = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@require_torch_multi_gpu
def _snake_case ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_UpperCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : str = f'''--output_dir {output_dir}'''.split()
_UpperCAmelCase : List[str] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
A_ : str = HfArgumentParser((TrainingArguments,))
A_ : Any = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
A_ : List[Any] = DummyDataset(dataset_length)
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = list(range(len(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
A_ : Dict = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
A_ : Optional[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A_ : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A_ : Any = 2
A_ : int = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A_ : Union[str, Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A_ : Any = None
| 351 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 0 |
__UpperCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 299 |
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> str:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_SCREAMING_SNAKE_CASE =''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "T5Config"
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
| 114 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : str = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[str] = "gpt_bigcode"
UpperCamelCase : List[Any] = ["past_key_values"]
UpperCamelCase : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , A=5_02_57 , A=10_24 , A=7_68 , A=12 , A=12 , A=None , A="gelu_pytorch_tanh" , A=0.1 , A=0.1 , A=0.1 , A=1e-5 , A=0.02 , A=True , A=True , A=5_02_56 , A=5_02_56 , A=True , A=True , A=True , **A , ) -> List[str]:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = n_positions
lowerCamelCase = n_embd
lowerCamelCase = n_layer
lowerCamelCase = n_head
lowerCamelCase = n_inner
lowerCamelCase = activation_function
lowerCamelCase = resid_pdrop
lowerCamelCase = embd_pdrop
lowerCamelCase = attn_pdrop
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_range
lowerCamelCase = scale_attn_weights
lowerCamelCase = use_cache
lowerCamelCase = attention_softmax_in_fpaa
lowerCamelCase = scale_attention_softmax_in_fpaa
lowerCamelCase = multi_query
lowerCamelCase = bos_token_id
lowerCamelCase = eos_token_id
super().__init__(bos_token_id=A , eos_token_id=A , **A )
| 252 |
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=0 ):
'''simple docstring'''
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[column] )
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=float("""inf""" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=float("""inf""" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase__ , lowerCamelCase__ )
# recursion
lowerCamelCase = points_counts // 2
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[:mid] , lowerCamelCase__ )
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase__ )
lowerCamelCase = dis_between_closest_in_strip(
lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
return min(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=0 )
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase : Dict = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 252 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
A__ = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__lowerCAmelCase )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = self._create_example_records()
A__ = Dataset.from_list(__lowerCAmelCase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__lowerCAmelCase ):
self.assertDictEqual(__lowerCAmelCase , example_records[i] )
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self._create_example_records()
A__ = Dataset.from_list(__lowerCAmelCase )
A__ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a_ ( self : Any ) -> Optional[Any]: # checks what happens with missing columns
"""simple docstring"""
A__ = [{"""col_1""": 1}, {"""col_2""": """x"""}]
A__ = Dataset.from_list(__lowerCAmelCase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a_ ( self : int ) -> List[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
A__ = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
A__ = Dataset.from_list(__lowerCAmelCase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = Dataset.from_list([] )
self.assertEqual(len(__lowerCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 276 |
import math
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = input("""Enter message: """ )
A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) )
A__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
A__ = encrypt_message(__a , __a )
elif mode.lower().startswith("""d""" ):
A__ = decrypt_message(__a , __a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = [""""""] * key
for col in range(__a ):
A__ = col
while pointer < len(__a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__a )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = math.ceil(len(__a ) / key )
A__ = key
A__ = (num_cols * num_rows) - len(__a )
A__ = [""""""] * num_cols
A__ = 0
A__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A__ = 0
row += 1
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 276 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 | 1 |
class lowercase :
def __init__( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
def __UpperCamelCase ( self , A_ , A_ ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCamelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCamelCase = self.__min_dist_top_down_dp(A_ , n - 1 )
UpperCamelCase = self.__min_dist_top_down_dp(m - 1 , A_ )
UpperCamelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCamelCase = 1 + min(A_ , A_ , A_ )
return self.dp[m][n]
def __UpperCamelCase ( self , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = worda
UpperCamelCase = worda
UpperCamelCase = [[-1 for _ in range(len(A_ ) )] for _ in range(len(A_ ) )]
return self.__min_dist_top_down_dp(len(A_ ) - 1 , len(A_ ) - 1 )
def __UpperCamelCase ( self , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = worda
UpperCamelCase = worda
UpperCamelCase = len(A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCamelCase = j
elif j == 0: # second string is empty
UpperCamelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCamelCase = self.dp[i - 1][j - 1]
else:
UpperCamelCase = self.dp[i][j - 1]
UpperCamelCase = self.dp[i - 1][j]
UpperCamelCase = self.dp[i - 1][j - 1]
UpperCamelCase = 1 + min(A_ , A_ , A_ )
return self.dp[m][n]
if __name__ == "__main__":
_UpperCAmelCase : Tuple = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
_UpperCAmelCase : Dict = input("Enter the first string: ").strip()
_UpperCAmelCase : List[Any] = input("Enter the second string: ").strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 110 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Any = CTRLTokenizer
__lowercase : Any = False
__lowercase : Union[str, Any] = False
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def __UpperCamelCase ( self , **A_ ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 110 | 1 |
"""simple docstring"""
import requests
lowercase__ = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(f'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 241 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__SCREAMING_SNAKE_CASE = None
try:
import msvcrt
except ImportError:
__SCREAMING_SNAKE_CASE = None
try:
import fcntl
except ImportError:
__SCREAMING_SNAKE_CASE = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__SCREAMING_SNAKE_CASE = OSError
# Data
# ------------------------------------------------
__SCREAMING_SNAKE_CASE = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__SCREAMING_SNAKE_CASE = '''3.0.12'''
__SCREAMING_SNAKE_CASE = None
def lowerCAmelCase_( ) -> int:
global _logger
_lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = lock_file
return None
def __str__( self ):
_lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.lock.release()
return None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
_lowerCamelCase = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
_lowerCamelCase = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
_lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCamelCase = None
# The default timeout value.
_lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
_lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCamelCase = 0
return None
@property
def snake_case__ ( self ):
return self._lock_file
@property
def snake_case__ ( self ):
return self._timeout
@timeout.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = float(lowerCamelCase__ )
return None
def snake_case__ ( self ):
raise NotImplementedError()
def snake_case__ ( self ):
raise NotImplementedError()
@property
def snake_case__ ( self ):
return self._lock_file_fd is not None
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCamelCase = id(self )
_lowerCamelCase = self._lock_file
_lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def snake_case__ ( self , lowerCamelCase__=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCamelCase = id(self )
_lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.release()
return None
def __del__( self ):
self.release(force=lowerCamelCase__ )
return None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
_lowerCamelCase = os.path.dirname(lowerCamelCase__ )
_lowerCamelCase = str(hash(lowerCamelCase__ ) )
_lowerCamelCase = filename[: max_length - len(lowerCamelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
_lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def snake_case__ ( self ):
_lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCamelCase = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
_lowerCamelCase = fd
return None
def snake_case__ ( self ):
_lowerCamelCase = self._lock_file_fd
_lowerCamelCase = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
_lowerCamelCase = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCamelCase = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
_lowerCamelCase = fd
return None
def snake_case__ ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowerCamelCase = self._lock_file_fd
_lowerCamelCase = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class lowerCamelCase_( A__ ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCamelCase = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
_lowerCamelCase = fd
return None
def snake_case__ ( self ):
os.close(self._lock_file_fd )
_lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__SCREAMING_SNAKE_CASE = None
if msvcrt:
__SCREAMING_SNAKE_CASE = WindowsFileLock
elif fcntl:
__SCREAMING_SNAKE_CASE = UnixFileLock
else:
__SCREAMING_SNAKE_CASE = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 368 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = ''''''
_lowerCamelCase = ''''''
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = 2_5_6
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = cva.imread(lowerCamelCase__ , 0 )
_lowerCamelCase = copy.deepcopy(self.img )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
_lowerCamelCase = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
_lowerCamelCase = x[i] / self.k
self.sk += prk
_lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase = int(last % last )
_lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
_lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def snake_case__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def snake_case__ ( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__SCREAMING_SNAKE_CASE : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 73 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a : str = "\nHuman: <<task>>\n\nAssistant: "
a : Tuple = "huggingface-tools/default-prompts"
a : Dict = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]="run" ):
if prompt_or_repo_id is None:
__UpperCAmelCase : Union[str, Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , __lowerCamelCase ) is not None:
return prompt_or_repo_id
__UpperCAmelCase : List[Any] = cached_file(
__lowerCamelCase , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 114 |
from math import log
from scipy.constants import Boltzmann, physical_constants
a : Any = 300 # TEMPERATURE (unit = K)
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 1 |
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 355 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324 | 0 |
'''simple docstring'''
A__: List[Any] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 276 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
'''simple docstring'''
_a , _a : List[str] =text, pattern
_a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
_a : Union[str, Any] =[]
for i in range(self.textLen - self.patLen + 1 ):
_a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
_a : int =self.match_in_pattern(self.text[mismatch_index] )
_a : List[str] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__: Any = '''ABAABA'''
A__: int = '''AB'''
A__: Optional[int] = BoyerMooreSearch(text, pattern)
A__: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 276 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase :Dict = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
"""simple docstring"""
try:
with open(lowerCAmelCase , 'rb' ) as flax_state_f:
__magic_name__ : Optional[int] = from_bytes(lowerCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCAmelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__magic_name__ : Dict = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase : x.dtype == jnp.bfloataa , lowerCAmelCase ) ).values()
if any(lowerCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__magic_name__ : Dict = jax.tree_util.tree_map(
lambda lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase )
__magic_name__ : Any = ''''''
__magic_name__ : List[Any] = flatten_dict(lowerCAmelCase , sep='.' )
__magic_name__ : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
__magic_name__ : List[str] = []
__magic_name__ : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__magic_name__ : Tuple = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__magic_name__ : Dict = flax_key_tuple_array[:-1] + ['''weight''']
__magic_name__ : Any = jnp.transpose(lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__magic_name__ : Any = flax_key_tuple_array[:-1] + ['''weight''']
__magic_name__ : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__magic_name__ : Union[str, Any] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCAmelCase ):
__magic_name__ : Any = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__magic_name__ : str = '''.'''.join(lowerCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__magic_name__ : int = np.asarray(lowerCAmelCase ) if not isinstance(lowerCAmelCase , np.ndarray ) else flax_tensor
__magic_name__ : Tuple = torch.from_numpy(lowerCAmelCase )
# remove from missing keys
missing_keys.remove(lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase )
pt_model.load_state_dict(lowerCAmelCase )
# re-transform missing_keys to list
__magic_name__ : int = list(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowerCAmelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 361 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Any = logging.get_logger(__name__)
lowerCAmelCase :int = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """lxmert"""
A_ : int = {}
def __init__( self : int , _A : List[Any]=30522 , _A : int=768 , _A : Any=12 , _A : Optional[Any]=9500 , _A : Any=1600 , _A : Tuple=400 , _A : str=3072 , _A : Tuple="gelu" , _A : int=0.1 , _A : List[Any]=0.1 , _A : Dict=512 , _A : Dict=2 , _A : Tuple=0.02 , _A : List[Any]=1E-12 , _A : Optional[Any]=9 , _A : List[Any]=5 , _A : str=5 , _A : int=2048 , _A : Tuple=4 , _A : Tuple=6.67 , _A : Dict=True , _A : str=True , _A : str=True , _A : Dict=True , _A : str=True , _A : int=True , _A : Tuple=True , **_A : int , ) -> str:
__magic_name__ : List[Any] = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Dict = hidden_act
__magic_name__ : Optional[Any] = intermediate_size
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : List[str] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Dict = type_vocab_size
__magic_name__ : str = initializer_range
__magic_name__ : str = layer_norm_eps
__magic_name__ : Union[str, Any] = num_qa_labels
__magic_name__ : str = num_object_labels
__magic_name__ : List[str] = num_attr_labels
__magic_name__ : Tuple = l_layers
__magic_name__ : List[Any] = x_layers
__magic_name__ : Optional[int] = r_layers
__magic_name__ : Dict = visual_feat_dim
__magic_name__ : Optional[int] = visual_pos_dim
__magic_name__ : Optional[int] = visual_loss_normalizer
__magic_name__ : int = task_matched
__magic_name__ : Dict = task_mask_lm
__magic_name__ : List[Any] = task_obj_predict
__magic_name__ : str = task_qa
__magic_name__ : List[Any] = visual_obj_loss
__magic_name__ : Dict = visual_attr_loss
__magic_name__ : int = visual_feat_loss
__magic_name__ : Tuple = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_A )
| 275 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class _a ( nn.Module ):
_lowercase : int
_lowercase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , UpperCamelCase_: int ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = hidden_states.shape
lowercase__ = jax.image.resize(
UpperCamelCase_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase__ = self.conv(UpperCamelCase_ )
return hidden_states
class _a ( nn.Module ):
_lowercase : int
_lowercase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[int] , UpperCamelCase_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.conv(UpperCamelCase_ )
return hidden_states
class _a ( nn.Module ):
_lowercase : int
_lowercase : int = None
_lowercase : float = 0.0
_lowercase : bool = None
_lowercase : jnp.dtype = jnp.floataa
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.in_channels if self.out_channels is None else self.out_channels
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase__ = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = nn.Dense(UpperCamelCase_ , dtype=self.dtype )
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase__ = nn.Dropout(self.dropout_prob )
lowercase__ = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase__ = None
if use_nin_shortcut:
lowercase__ = nn.Conv(
UpperCamelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: str=True ) -> Tuple:
"""simple docstring"""
lowercase__ = hidden_states
lowercase__ = self.norma(UpperCamelCase_ )
lowercase__ = nn.swish(UpperCamelCase_ )
lowercase__ = self.conva(UpperCamelCase_ )
lowercase__ = self.time_emb_proj(nn.swish(UpperCamelCase_ ) )
lowercase__ = jnp.expand_dims(jnp.expand_dims(UpperCamelCase_ , 1 ) , 1 )
lowercase__ = hidden_states + temb
lowercase__ = self.norma(UpperCamelCase_ )
lowercase__ = nn.swish(UpperCamelCase_ )
lowercase__ = self.dropout(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.conva(UpperCamelCase_ )
if self.conv_shortcut is not None:
lowercase__ = self.conv_shortcut(UpperCamelCase_ )
return hidden_states + residual
| 110 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = credit_card_number
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE ) - 2
for i in range(SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowercase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowercase__ = cc_number[:i] + str(SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(SCREAMING_SNAKE_CASE ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(SCREAMING_SNAKE_CASE ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(SCREAMING_SNAKE_CASE ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 110 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCamelCase ( a , a , a , a , ) -> list[float]:
'''simple docstring'''
__magic_name__ , __magic_name__ = coefficient_matrix.shape
__magic_name__ , __magic_name__ = constant_matrix.shape
if rowsa != colsa:
__magic_name__ = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(a )
if colsa != 1:
__magic_name__ = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(a )
if rowsa != rowsa:
__magic_name__ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(a )
if len(a ) != rowsa:
__magic_name__ = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'''matrix but received {len(a )} and {rowsa}'''
)
raise ValueError(a )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__magic_name__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__magic_name__ , __magic_name__ = table.shape
strictly_diagonally_dominant(a )
# Iterates the whole matrix for given number of times
for _ in range(a ):
__magic_name__ = []
for row in range(a ):
__magic_name__ = 0
for col in range(a ):
if col == row:
__magic_name__ = table[row][col]
elif col == cols - 1:
__magic_name__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__magic_name__ = (temp + val) / denom
new_val.append(a )
__magic_name__ = new_val
return [float(a ) for i in new_val]
def UpperCamelCase ( a ) -> bool:
'''simple docstring'''
__magic_name__ , __magic_name__ = table.shape
__magic_name__ = True
for i in range(0 , a ):
__magic_name__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE :Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case__ ( self : Tuple , a__ : Tuple , a__ : int , a__ : int ):
__magic_name__ = TextaTextGenerationPipeline(model=a__ , tokenizer=a__ )
return generator, ["Something to write", "Something else"]
def snake_case__ ( self : List[str] , a__ : List[Any] , a__ : List[str] ):
__magic_name__ = generator('''Something there''' )
self.assertEqual(a__ , [{'''generated_text''': ANY(a__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__magic_name__ = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
] , )
__magic_name__ = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
generator(4 )
@require_torch
def snake_case__ ( self : Any ):
__magic_name__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__magic_name__ = generator('''Something there''' , do_sample=a__ )
self.assertEqual(a__ , [{'''generated_text''': ''''''}] )
__magic_name__ = 3
__magic_name__ = generator(
'''Something there''' , num_return_sequences=a__ , num_beams=a__ , )
__magic_name__ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(a__ , a__ )
__magic_name__ = generator('''This is a test''' , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = '''<pad>'''
__magic_name__ = generator(
['''This is a test''', '''This is a second test'''] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case__ ( self : int ):
__magic_name__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__magic_name__ = generator('''Something there''' , do_sample=a__ )
self.assertEqual(a__ , [{'''generated_text''': ''''''}] )
| 98 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 97 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str , lowercase : Any , lowercase : Tuple=13 , lowercase : Optional[int]=2 , lowercase : int=24 , lowercase : List[str]=16 , lowercase : int=True , lowercase : List[Any]=True , lowercase : str=32 , lowercase : Tuple=5 , lowercase : str=4 , lowercase : Tuple=37 , lowercase : Tuple="gelu" , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : Dict=10 , lowercase : int=0.02 , lowercase : Optional[Any]=None , lowercase : Dict=2 , lowercase : List[Any]=2 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = patch_size
_snake_case = max_length
_snake_case = num_mel_bins
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
_snake_case = frequency_stride
_snake_case = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_snake_case = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_snake_case = (self.max_length - self.patch_size) // self.time_stride + 1
_snake_case = frequency_out_dimension * time_out_dimension
_snake_case = num_patches + 2
def A ( self : int ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, input_values, labels
def A ( self : str ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A ( self : Union[str, Any] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = ASTModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Dict = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
_UpperCAmelCase : Any = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : int = False
def A ( self : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Any , lowercase : List[Any] , lowercase : int ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = ASTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['input_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@slow
def A ( self : Any ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ASTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Tuple:
_snake_case = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_snake_case , _snake_case = torchaudio.load(__lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.default_feature_extractor
_snake_case = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(lowercase )
_snake_case = self.default_feature_extractor
_snake_case , _snake_case = prepare_audio()
_snake_case = audio.squeeze().numpy()
_snake_case = feature_extractor(lowercase , sampling_rate=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 130 |
import random
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def A ( lowercase : str ):
'''simple docstring'''
_snake_case = [ord(lowercase ) for i in text]
_snake_case = []
_snake_case = []
for i in plain:
_snake_case = random.randint(1 , 300 )
_snake_case = (i + k) * k
cipher.append(lowercase )
key.append(lowercase )
return cipher, key
@staticmethod
def A ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
_snake_case = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase ) )
return "".join(lowercase )
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase : Optional[int] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 130 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : List[str] ,_snake_case : List[str]=13 ,_snake_case : Any=30 ,_snake_case : str=2 ,_snake_case : Union[str, Any]=3 ,_snake_case : Dict=True ,_snake_case : Optional[int]=True ,_snake_case : Optional[Any]=32 ,_snake_case : Union[str, Any]=5 ,_snake_case : str=4 ,_snake_case : Dict=37 ,_snake_case : int="gelu" ,_snake_case : List[str]=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : Optional[int]=10 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : List[str]=None ,_snake_case : Tuple=2 ,) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[Any] = scope
lowercase__ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : Tuple = (image_size // patch_size) ** 2
lowercase__ : Optional[int] = num_patches + 1
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCAmelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : str ,_snake_case : List[str] ,_snake_case : Any ,_snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : str = 1
lowercase__ : Any = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : Any ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = self.type_sequence_label_size
lowercase__ : str = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[int] = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : Tuple = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = config_and_inputs
lowercase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = ViTModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=lowerCAmelCase__ ,has_text_modality=lowerCAmelCase__ ,hidden_size=37 )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ ,nn.Linear ) )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowerCAmelCase__ )
lowercase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[Any] = [*signature.parameters.keys()]
lowercase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowerCAmelCase__ )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( ) -> int:
lowercase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : str = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(lowerCAmelCase__ )
lowercase__ : Any = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : Any = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase__ : Any = model(**lowerCAmelCase__ )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase__ )
lowercase__ : Optional[Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase__ : Dict = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(lowerCAmelCase__ )
lowercase__ : List[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' ,size=480 )
lowercase__ : Tuple = prepare_img()
lowercase__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowercase__ : List[Any] = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase__ : Any = model(lowerCAmelCase__ ,interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
lowercase__ : Any = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,lowerCAmelCase__ )
lowercase__ : List[Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCAmelCase__ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Any = ViTModel.from_pretrained('''facebook/dino-vits8''' ,torch_dtype=torch.floataa ,device_map='''auto''' )
lowercase__ : int = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : int = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowercase__ : int = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ : Optional[int] = model(lowerCAmelCase__ )
| 16 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[Any] = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {'vocab_file': 'vocab.txt'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__UpperCamelCase : Union[str, Any] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def A ( _lowercase ):
with open(_lowercase , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Union[str, Any]="<cls>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any="<eos>" , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_vocab_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE : List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE : Union[str, Any] = unk_token
SCREAMING_SNAKE_CASE : Any = cls_token
SCREAMING_SNAKE_CASE : List[str] = pad_token
SCREAMING_SNAKE_CASE : List[str] = mask_token
SCREAMING_SNAKE_CASE : Any = eos_token
SCREAMING_SNAKE_CASE : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return text.split()
def __A ( self : List[str] , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
return len(self._id_to_token )
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __A ( self : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def __A ( self : List[str] , UpperCamelCase__ : int ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def __A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __A ( self : Union[str, Any] , UpperCamelCase__ : List , UpperCamelCase__ : Optional[List] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE : List[str] = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCamelCase__ ) + [1]
return mask
def __A ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = os.path.join(UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __A ( self : Dict ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : Union[List[str], List[AddedToken]] , UpperCamelCase__ : bool = False ):
'''simple docstring'''
return super()._add_tokens(UpperCamelCase__ , special_tokens=UpperCamelCase__ )
| 258 | 1 |
UpperCamelCase__ = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(["""image"""])
UpperCamelCase__ = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ = frozenset(["""image"""])
UpperCamelCase__ = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """image""", """negative_prompt"""])
UpperCamelCase__ = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
UpperCamelCase__ = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ = frozenset(["""image""", """mask_image"""])
UpperCamelCase__ = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ = frozenset(["""example_image""", """image""", """mask_image"""])
UpperCamelCase__ = frozenset(["""class_labels"""])
UpperCamelCase__ = frozenset(["""class_labels"""])
UpperCamelCase__ = frozenset(["""batch_size"""])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(["""batch_size"""])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ = frozenset(["""input_tokens"""])
UpperCamelCase__ = frozenset(["""input_tokens"""])
| 92 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Any = global_rng
__lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : str = min_seq_length
__lowerCAmelCase : int = max_seq_length
__lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Any = padding_value
__lowerCAmelCase : str = sampling_rate
__lowerCAmelCase : Optional[Any] = return_attention_mask
__lowerCAmelCase : Optional[Any] = do_normalize
__lowerCAmelCase : Optional[Any] = feature_size
__lowerCAmelCase : Optional[int] = chunk_length
__lowerCAmelCase : Optional[Any] = hop_length
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ )
__lowerCAmelCase : Dict = feat_extract_first.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters
__lowerCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
__lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ )
__lowerCAmelCase : List[str] = feat_extract_first.to_dict()
__lowerCAmelCase : Tuple = feat_extract_second.to_dict()
__lowerCAmelCase : Any = feat_extract_first.mel_filters
__lowerCAmelCase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : Optional[int] = np.asarray(A_ )
__lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
__lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
__lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
import torch
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
__lowerCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__lowerCAmelCase : int = self._load_datasamples(1 )
__lowerCAmelCase : Any = WhisperFeatureExtractor()
__lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = self._load_datasamples(1 )[0]
__lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 275 | 0 |
'''simple docstring'''
def lowerCAmelCase__ ( ):
_A : Optional[int] = 0
for i in range(1 ,1001 ):
total += i**i
return str(lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 227 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size for training."} )
a = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a = field(
default=1_0000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a = field(default=2E-4 , metadata={"help": "Learning rate fo training."} )
a = field(default="cosine" , metadata={"help": "Learning rate."} )
a = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
a = field(
default=a_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a = field(default=5_0000 , metadata={"help": "Maximum number of training steps."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
a = field(default=1 , metadata={"help": "Training seed."} )
a = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a = field(
default=a_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a = field(default=a_ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(default=a_ , metadata={"help": "Number of workers used for code evaluation."} )
a = field(
default=a_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a = field(
default=a_ , metadata={"help": "Sample from the language model's output distribution."} )
a = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
a = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
a = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default=a_ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a = field(
default=10_0000 , metadata={"help": "Number of files to save per JSON output file."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a = field(
default=a_ , metadata={"help": "If True, near-duplicate samples are removed."} )
a = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(default=20_0000 , metadata={"help": "Number of examples to train tokenizer on."} )
a = field(
default=3_2768 , metadata={"help": "Number of examples to train the tokenizer on."} )
a = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a = field(default=a_ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a = field(default=a_ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a = field(default=a_ , metadata={"help": "Push saved tokenizer to the hub."} )
| 227 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Tuple=32 ,lowerCamelCase__ : Union[str, Any]=5 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : str=37 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[str]=10 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=2 ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ = (image_size // patch_size) ** 2
UpperCAmelCase__ = num_patches + 2
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Any ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = DeiTModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def __lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowerCAmelCase ( self : str ):
pass
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str]=False ):
UpperCAmelCase__ = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self : List[str] ):
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCAmelCase__ = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
UpperCAmelCase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ = False
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
UpperCAmelCase__ = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
UpperCAmelCase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCAmelCase__ = problem_type['title']
UpperCAmelCase__ = problem_type['num_labels']
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCAmelCase__ = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
UpperCAmelCase__ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
UpperCAmelCase__ = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowerCAmelCase ( self : int ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a_ ( ):
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Tuple ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
lowerCamelCase__ )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' ,torch_dtype=torch.floataa ,device_map='auto' )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' )
UpperCAmelCase__ = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ )
| 98 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
class _lowerCamelCase( nn.Module ):
lowercase_ : int
lowercase_ : Tuple[int] = (16, 32, 96, 2_56)
lowercase_ : jnp.dtype = jnp.floataa
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_lowercase : Optional[int] = []
for i in range(len(self.block_out_channels) - 1):
_lowercase : List[Any] = self.block_out_channels[i]
_lowercase : str = self.block_out_channels[i + 1]
_lowercase : Union[str, Any] = nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase)
_lowercase : List[str] = nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase)
_lowercase : Tuple = blocks
_lowercase : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : int = self.conv_in(lowerCamelCase)
_lowercase : Union[str, Any] = nn.silu(lowerCamelCase)
for block in self.blocks:
_lowercase : Optional[int] = block(lowerCamelCase)
_lowercase : List[Any] = nn.silu(lowerCamelCase)
_lowercase : int = self.conv_out(lowerCamelCase)
return embedding
@flax_register_to_config
class _lowerCamelCase( nn.Module, _a, _a ):
lowercase_ : int = 32
lowercase_ : int = 4
lowercase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase_ : Union[bool, Tuple[bool]] = False
lowercase_ : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
lowercase_ : int = 2
lowercase_ : Union[int, Tuple[int]] = 8
lowercase_ : Optional[Union[int, Tuple[int]]] = None
lowercase_ : int = 12_80
lowercase_ : float = 0.0
lowercase_ : bool = False
lowercase_ : jnp.dtype = jnp.floataa
lowercase_ : bool = True
lowercase_ : int = 0
lowercase_ : str = "rgb"
lowercase_ : Tuple[int] = (16, 32, 96, 2_56)
def UpperCamelCase ( self, lowerCamelCase) -> FrozenDict:
"""simple docstring"""
_lowercase : Any = (1, self.in_channels, self.sample_size, self.sample_size)
_lowercase : Union[str, Any] = jnp.zeros(lowerCamelCase, dtype=jnp.floataa)
_lowercase : int = jnp.ones((1,), dtype=jnp.intaa)
_lowercase : Dict = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa)
_lowercase : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
_lowercase : List[str] = jnp.zeros(lowerCamelCase, dtype=jnp.floataa)
_lowercase : str = jax.random.split(lowerCamelCase)
_lowercase : Tuple = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)["params"]
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[str] = self.block_out_channels
_lowercase : int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowercase : int = self.num_attention_heads or self.attention_head_dim
# input
_lowercase : Optional[int] = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_lowercase : Tuple = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift)
_lowercase : int = FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype)
_lowercase : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
_lowercase : Optional[Any] = self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Optional[int] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = (num_attention_heads,) * len(self.down_block_types)
# down
_lowercase : List[str] = []
_lowercase : List[str] = []
_lowercase : List[Any] = block_out_channels[0]
_lowercase : Union[str, Any] = nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase)
for i, down_block_type in enumerate(self.down_block_types):
_lowercase : Optional[int] = output_channel
_lowercase : str = block_out_channels[i]
_lowercase : Union[str, Any] = i == len(lowerCamelCase) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowercase : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
_lowercase : str = FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase)
for _ in range(self.layers_per_block):
_lowercase : str = nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase)
if not is_final_block:
_lowercase : List[str] = nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase)
_lowercase : Optional[int] = down_blocks
_lowercase : Any = controlnet_down_blocks
# mid
_lowercase : int = block_out_channels[-1]
_lowercase : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
_lowercase : Optional[int] = nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1.0, lowerCamelCase = True, lowerCamelCase = False, ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
_lowercase : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_lowercase : Tuple = jnp.flip(lowerCamelCase, axis=1)
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray):
_lowercase : Dict = jnp.array([timesteps], dtype=jnp.intaa)
elif isinstance(lowerCamelCase, jnp.ndarray) and len(timesteps.shape) == 0:
_lowercase : str = timesteps.astype(dtype=jnp.floataa)
_lowercase : str = jnp.expand_dims(lowerCamelCase, 0)
_lowercase : Optional[int] = self.time_proj(lowerCamelCase)
_lowercase : Optional[int] = self.time_embedding(lowerCamelCase)
# 2. pre-process
_lowercase : Union[str, Any] = jnp.transpose(lowerCamelCase, (0, 2, 3, 1))
_lowercase : Dict = self.conv_in(lowerCamelCase)
_lowercase : Optional[Any] = jnp.transpose(lowerCamelCase, (0, 2, 3, 1))
_lowercase : int = self.controlnet_cond_embedding(lowerCamelCase)
sample += controlnet_cond
# 3. down
_lowercase : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[Any] = down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train)
else:
_lowercase : List[str] = down_block(lowerCamelCase, lowerCamelCase, deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
_lowercase : List[str] = self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train)
# 5. contronet blocks
_lowercase : List[Any] = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks):
_lowercase : Tuple = controlnet_block(lowerCamelCase)
controlnet_down_block_res_samples += (down_block_res_sample,)
_lowercase : Optional[Any] = controlnet_down_block_res_samples
_lowercase : Union[str, Any] = self.controlnet_mid_block(lowerCamelCase)
# 6. scaling
_lowercase : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase)
| 354 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowercase : List[str] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = 0.0, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCamelCase):
_lowercase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_lowercase : str = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowercase : Union[str, Any] = self.unet(lowerCamelCase, lowerCamelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase).prev_sample
_lowercase : Any = (image / 2 + 0.5).clamp(0, 1)
_lowercase : str = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : Optional[int] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 130 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
return choice(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Dict = random_pivot(lowerCAmelCase__ )
# partition based on pivot
# linear time
lowerCAmelCase_ : Optional[int] = [e for e in lst if e < pivot]
lowerCAmelCase_ : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase__ ) < k - 1:
return kth_number(lowerCAmelCase__ , k - len(lowerCAmelCase__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_, lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Dict=9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-5 , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = embed_dim
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Any = qkv_bias
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase_ : List[Any] = ['stem'] + [F"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 289 | 1 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
A = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
_lowerCamelCase : int = 701
_lowerCamelCase : Tuple = 10_0000_0000
_lowerCamelCase : List[str] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""CLIPFeatureExtractor"""]
__snake_case = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
__snake_case = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
__snake_case = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
__snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case = [0] * args.vocab_size
for k, v in counter.items():
__snake_case = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 112 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase: Tuple = logging.get_logger(__name__)
# General docstring
_lowercase: str = "RegNetConfig"
# Base docstring
_lowercase: Union[str, Any] = "facebook/regnet-y-040"
_lowercase: Optional[Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase: Dict = "facebook/regnet-y-040"
_lowercase: Union[str, Any] = "tabby, tabby cat"
_lowercase: str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , ):
"""simple docstring"""
super().__init__()
a = nn.Convad(
lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , groups=lowerCamelCase_ , bias=lowerCamelCase_ , )
a = nn.BatchNormad(lowerCamelCase_ )
a = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.convolution(lowerCamelCase_ )
a = self.normalization(lowerCamelCase_ )
a = self.activation(lowerCamelCase_ )
return hidden_state
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
a = config.num_channels
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
a = self.embedder(lowerCamelCase_ )
return hidden_state
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 ):
"""simple docstring"""
super().__init__()
a = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ )
a = nn.BatchNormad(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.convolution(lowerCamelCase_ )
a = self.normalization(lowerCamelCase_ )
return hidden_state
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = nn.AdaptiveAvgPoolad((1, 1) )
a = nn.Sequential(
nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.pooler(lowerCamelCase_ )
a = self.attention(lowerCamelCase_ )
a = hidden_state * attention
return hidden_state
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ):
"""simple docstring"""
super().__init__()
a = in_channels != out_channels or stride != 1
a = max(1 , out_channels // config.groups_width )
a = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
a = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
a = ACTaFN[config.hidden_act]
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = hidden_state
a = self.layer(lowerCamelCase_ )
a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
a = self.activation(lowerCamelCase_ )
return hidden_state
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ):
"""simple docstring"""
super().__init__()
a = in_channels != out_channels or stride != 1
a = max(1 , out_channels // config.groups_width )
a = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
a = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
a = ACTaFN[config.hidden_act]
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = hidden_state
a = self.layer(lowerCamelCase_ )
a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
a = self.activation(lowerCamelCase_ )
return hidden_state
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , ):
"""simple docstring"""
super().__init__()
a = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
a = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for _ in range(depth - 1 )] , )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.layers(lowerCamelCase_ )
return hidden_state
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ):
"""simple docstring"""
a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a = hidden_states + (hidden_state,)
a = stage_module(lowerCamelCase_ )
if output_hidden_states:
a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = RegNetConfig
__A = "regnet"
__A = "pixel_values"
__A = True
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = value
_lowercase: str = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase: List[Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", lowerCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
a = config
a = RegNetEmbeddings(lowerCamelCase_ )
a = RegNetEncoder(lowerCamelCase_ )
a = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ):
"""simple docstring"""
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.embedder(lowerCamelCase_ )
a = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
a = encoder_outputs[0]
a = self.pooler(lowerCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", lowerCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
a = config.num_labels
a = RegNetModel(lowerCamelCase_ )
# classification head
a = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ (self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ):
"""simple docstring"""
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.regnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
a = outputs.pooler_output if return_dict else outputs[1]
a = self.classifier(lowerCamelCase_ )
a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a = "single_label_classification"
else:
a = "multi_label_classification"
if self.config.problem_type == "regression":
a = MSELoss()
if self.num_labels == 1:
a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a = BCEWithLogitsLoss()
a = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
a = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 227 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = 42
__A = None
def a( A : Optional[Any] , A : Any=0.999 , A : Dict="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a = []
for i in range(A ):
a = i / num_diffusion_timesteps
a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) )
return torch.tensor(A , dtype=torch.floataa )
class _lowercase ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
__A = 1
@register_to_config
def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0001 , lowerCamelCase_ = 0.02 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = 0 , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = 1.0 , **lowerCamelCase_ , ):
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , lowerCamelCase_ ) is not None:
a = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
a = kwargs["set_alpha_to_one"]
if trained_betas is not None:
a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a = 1.0 - self.betas
a = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
a = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
a = 1.0
# setable values
a = None
a = torch.from_numpy(np.arange(0 , lowerCamelCase_ ).copy().astype(np.intaa ) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
return sample
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
a = num_inference_steps
a = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round().copy().astype(np.intaa )
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
self.timesteps += self.config.steps_offset
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.0 , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , ):
"""simple docstring"""
a = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
a = self.alphas_cumprod[timestep]
a = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
a = model_output
elif self.config.prediction_type == "sample":
a = model_output
a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
a = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
a = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 227 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
while b:
__lowerCAmelCase: List[str] = b, a % b
return a
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__SCREAMING_SNAKE_CASE , a % b )
def a__ ( ) -> Tuple:
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 354 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__A = get_logger(__name__)
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = """dummy_data"""
SCREAMING_SNAKE_CASE_ : List[Any] = """datasets"""
SCREAMING_SNAKE_CASE_ : Any = False
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[Version, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[List[Callable]] = None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: Tuple = dataset_name
__lowerCAmelCase: Optional[Any] = cache_dir
__lowerCAmelCase: Optional[int] = use_local_dummy_data
__lowerCAmelCase: Optional[Any] = config
# download_callbacks take a single url as input
__lowerCAmelCase: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCAmelCase: Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCAmelCase: List[str] = str(UpperCamelCase__)
# to be downloaded
__lowerCAmelCase: Dict = None
__lowerCAmelCase: Dict = None
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._dummy_file is None:
__lowerCAmelCase: Tuple = self.download_dummy_data()
return self._dummy_file
@property
def lowercase_ ( self : Dict)-> Optional[Any]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCAmelCase: str = cached_path(
UpperCamelCase__ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase__ , force_extract=UpperCamelCase__)
return os.path.join(UpperCamelCase__ , self.dummy_file_name)
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
if self._bucket_url is None:
__lowerCAmelCase: int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def lowercase_ ( self : List[Any] , UpperCamelCase__ : int , *UpperCamelCase__ : List[str])-> Optional[int]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCAmelCase: List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCAmelCase: str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase__ , UpperCamelCase__):
return self.create_dummy_data_dict(UpperCamelCase__ , UpperCamelCase__)
elif isinstance(UpperCamelCase__ , (list, tuple)):
return self.create_dummy_data_list(UpperCamelCase__ , UpperCamelCase__)
else:
return self.create_dummy_data_single(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Dict , *UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
return path
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
return {}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
for single_url in single_urls:
download_callback(UpperCamelCase__)
else:
__lowerCAmelCase: Union[str, Any] = single_urls
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name)) for x in single_urls]
else:
__lowerCAmelCase: Any = single_urls
__lowerCAmelCase: Optional[int] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name))
__lowerCAmelCase: Dict = value
# make sure that values are unique
if all(isinstance(UpperCamelCase__ , UpperCamelCase__) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__lowerCAmelCase: Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Tuple = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCAmelCase: Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase__)) for url in data_url)
__lowerCAmelCase: str = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__lowerCAmelCase: Optional[int] = [data_url[0]] * len(UpperCamelCase__)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: Optional[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(UpperCamelCase__)
return dummy_data_list
def lowercase_ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: List[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(UpperCamelCase__) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self : Dict , UpperCamelCase__ : str)-> int:
'''simple docstring'''
def _iter_archive_members(UpperCamelCase__ : str):
# this preserves the order of the members inside the ZIP archive
__lowerCAmelCase: Optional[Any] = Path(self.dummy_file).parent
__lowerCAmelCase: Optional[int] = path.relative_to(UpperCamelCase__)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__lowerCAmelCase: Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(UpperCamelCase__)
__lowerCAmelCase: str = Path(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = _iter_archive_members(UpperCamelCase__) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(UpperCamelCase__).as_posix(), file_path.open("rb")
def lowercase_ ( self : str , UpperCamelCase__ : str)-> str:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase__):
if filename.startswith((".", "__")):
continue
yield os.path.join(UpperCamelCase__ , UpperCamelCase__)
| 108 | 0 |
def a_ ( SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1_000 ):
'''simple docstring'''
_lowerCamelCase : str =1
_lowerCamelCase : str =0
for divide_by_number in range(lowercase__ , digit + 1 ):
_lowerCamelCase : list[int] =[]
_lowerCamelCase : int =numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase__ ):
_lowerCamelCase : Tuple =len(lowercase__ )
_lowerCamelCase : int =divide_by_number
else:
has_been_divided.append(lowercase__ )
_lowerCamelCase : Optional[int] =now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84 | 0 |
import random
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = a[left_index]
A_ : str = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
A_ , A_ : Any = a[i], a[j]
i += 1
A_ , A_ : Dict = a[i - 1], a[left_index]
return i - 1
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if left < right:
A_ : List[str] = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
A_ , A_ : Dict = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A_ : Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def _SCREAMING_SNAKE_CASE ( ):
A_ : Dict = input('''Enter numbers separated by a comma:\n''' ).strip()
A_ : Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 65 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = StableDiffusionLDMaDPipeline
snake_case = TEXT_TO_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self )->str:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
A_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : str = CLIPTextModel(_SCREAMING_SNAKE_CASE )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Dict:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : List[Any] = rgb[0, -3:, -3:, -1]
A_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : Tuple = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
A_ : Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : List[str] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = 3 * [inputs['''prompt''']]
# forward
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Tuple = rgb_slice_a[0, -3:, -3:, -1]
A_ : Optional[Any] = depth_slice_a[0, -3:, -1]
A_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = 3 * [inputs.pop('''prompt''' )]
A_ : Tuple = ldmad_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
A_ : Dict = text_inputs['''input_ids'''].to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ldmad_pipe.text_encoder(_SCREAMING_SNAKE_CASE )[0]
A_ : Optional[int] = prompt_embeds
# forward
A_ : Optional[int] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Any = output.rgb, output.depth
A_ : Any = rgb_slice_a[0, -3:, -3:, -1]
A_ : str = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.get_dummy_components()
A_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ : int = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = '''french fries'''
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Optional[Any] = rgb[0, -3:, -3:, -1]
A_ : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : int = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
A_ : Any = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : str = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : int = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
A_ : Optional[Any] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = output.rgb, output.depth
A_ : int = rgb[0, -3:, -3:, -1].flatten()
A_ : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
A_ : Tuple = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
A_ : Tuple = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->int:
'''simple docstring'''
A_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Any = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : List[Any] = output.rgb, output.depth
A_ : int = 0.4_9_5_5_8_6
A_ : Union[str, Any] = 0.3_3_7_9_5_5_1_5
A_ : Optional[int] = 1_1_2.4_8_5_1_8
A_ : Optional[Any] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Any = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : Tuple = 0.4_1_9_4_1_2_7
A_ : Union[str, Any] = 0.3_5_3_7_5_5_8_6
A_ : Union[str, Any] = 0.5_6_3_8_5_0_2
A_ : List[Any] = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 65 | 1 |
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / h ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(lowercase ):
_UpperCAmelCase = f(lowercase ,y[k] )
_UpperCAmelCase = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
_UpperCAmelCase = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
_UpperCAmelCase = f(x + h ,y[k] + h * ka )
_UpperCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 |
"""simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
__magic_name__ : str = """ZinengTang/tvlt-base"""
__magic_name__ : Optional[int] = tempfile.mkdtemp()
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Dict:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Dict:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> int:
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : Dict = self.get_feature_extractor()
__magic_name__ : List[str] = TvltProcessor(image_processor=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase__ )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : Optional[int] = TvltProcessor(image_processor=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = np.ones([1_20_00] )
__magic_name__ : List[str] = feature_extractor(lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : int = processor(audio=lowerCAmelCase__ , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : Dict = TvltProcessor(image_processor=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
__magic_name__ : Dict = np.ones([3, 2_24, 2_24] )
__magic_name__ : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : Any = processor(images=lowerCAmelCase__ , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[str] = self.get_image_processor()
__magic_name__ : List[str] = self.get_feature_extractor()
__magic_name__ : int = TvltProcessor(image_processor=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = np.ones([1_20_00] )
__magic_name__ : Tuple = np.ones([3, 2_24, 2_24] )
__magic_name__ : str = processor(audio=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> int:
__magic_name__ : Optional[Any] = self.get_image_processor()
__magic_name__ : str = self.get_feature_extractor()
__magic_name__ : Optional[int] = TvltProcessor(image_processor=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 138 |
def UpperCamelCase ( _A = 1, _A = 1000 ):
"""simple docstring"""
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = 0
for divide_by_number in range(_A, digit + 1 ):
__magic_name__ : list[int] = []
__magic_name__ : Any = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_A ):
__magic_name__ : int = len(_A )
__magic_name__ : Dict = divide_by_number
else:
has_been_divided.append(_A )
__magic_name__ : Optional[int] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Tuple = '''switch_transformers'''
_A : Tuple = ['''past_key_values''']
_A : int = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : List[str] , lowerCAmelCase__ : Tuple=3_2_1_2_8 , lowerCAmelCase__ : Tuple=7_6_8 , lowerCAmelCase__ : Any=6_4 , lowerCAmelCase__ : Any=2_0_4_8 , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : str=8 , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Dict=0.01 , lowerCAmelCase__ : Dict="float32" , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : List[Any]=1_2_8 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=1E-6 , lowerCAmelCase__ : Tuple=0.0_01 , lowerCAmelCase__ : Dict=0.0_01 , lowerCAmelCase__ : Optional[int]=1.0 , lowerCAmelCase__ : Union[str, Any]="relu" , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : List[Any]=1 , **lowerCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : str = d_model
__SCREAMING_SNAKE_CASE : Tuple = d_kv
__SCREAMING_SNAKE_CASE : int = d_ff
__SCREAMING_SNAKE_CASE : int = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_layers
__SCREAMING_SNAKE_CASE : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : List[str] = expert_capacity
__SCREAMING_SNAKE_CASE : Optional[int] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__SCREAMING_SNAKE_CASE : Dict = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : Any = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : List[Any] = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Any = dropout_rate
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
__SCREAMING_SNAKE_CASE : Dict = feed_forward_proj
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
__SCREAMING_SNAKE_CASE : Optional[int] = add_router_probs
__SCREAMING_SNAKE_CASE : Any = router_z_loss_coef
__SCREAMING_SNAKE_CASE : Tuple = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Optional[Any] = self.feed_forward_proj.split("""-""" )
__SCREAMING_SNAKE_CASE : Tuple = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == """gated"""
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[str] = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 112 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ : List[Any] = '''base_with_context'''
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : Tuple = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""attention"""]
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str] ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__SCREAMING_SNAKE_CASE : Tuple = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""attention"""]
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__SCREAMING_SNAKE_CASE : str = weights[F"layers_{lyr_num}"]
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[Any] = ly_weight["""self_attention"""]
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["""MultiHeadDotProductAttention_0"""]
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
__SCREAMING_SNAKE_CASE : Any = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
__SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__SCREAMING_SNAKE_CASE : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__SCREAMING_SNAKE_CASE : Tuple = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__SCREAMING_SNAKE_CASE : int = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
UpperCamelCase__ : List[str] = parser.parse_args()
main(args)
| 112 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 39 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a__: int = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] )->List[str]:
return torch.atana(UpperCamelCase__ , UpperCamelCase__ ) / math.pi * 2
def UpperCamelCase__( UpperCamelCase__ : str )->List[Any]:
A__ = torch.sin(t * math.pi / 2 ) ** 2
A__ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase__ , UpperCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self,__lowerCamelCase ):
super().__init__()
A__ = DiffusionAttnUnetaD(__lowerCamelCase,n_attn_layers=4 )
A__ = deepcopy(self.diffusion )
A__ = torch.quasirandom.SobolEngine(1,scramble=__lowerCamelCase )
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->List[Any]:
A__ = MODELS_MAP[model_name]['''url''']
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
a__: Union[str, Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
a__: Union[str, Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
a__: str = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
a__: List[str] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
a__: Dict = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
a__: List[str] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->Optional[Any]:
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCamelCase__( UpperCamelCase__ : str )->Any:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return name.replace(UpperCamelCase__ , UpperCamelCase__ )
elif name.startswith(UpperCamelCase__ ):
return [name.replace(UpperCamelCase__ , UpperCamelCase__ ) for v in value]
raise ValueError(f"Attn error with {name}" )
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=13 )->Optional[Any]:
A__ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
A__ = 0
if string.startswith('''net.3.''' ):
depth += 1
A__ = string[6:]
elif string.startswith('''net.''' ):
A__ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
A__ = string[7:]
if string.startswith('''main.''' ):
A__ = string[5:]
# mid block
if string[:2].isdigit():
A__ = string[:2]
A__ = string[2:]
else:
A__ = string[0]
A__ = string[1:]
if depth == max_depth:
A__ = MID_NUM_TO_LAYER[layer_num]
A__ = '''mid_block'''
elif depth > 0 and int(UpperCamelCase__ ) < 7:
A__ = DOWN_NUM_TO_LAYER[layer_num]
A__ = f"down_blocks.{depth}"
elif depth > 0 and int(UpperCamelCase__ ) > 7:
A__ = UP_NUM_TO_LAYER[layer_num]
A__ = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
A__ = DEPTH_0_TO_LAYER[layer_num]
A__ = f"up_blocks.{max_depth - 1}" if int(UpperCamelCase__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
A__ = string_left[1:]
if "resnets" in new_layer:
A__ = convert_resconv_naming(UpperCamelCase__ )
elif "attentions" in new_layer:
A__ = convert_attn_naming(UpperCamelCase__ )
A__ = new_string_left
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
A__ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def UpperCamelCase__( UpperCamelCase__ : int )->int:
A__ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
A__ = rename(UpperCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = transform_conv_attns(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A__ = v
return new_state_dict
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] )->Optional[int]:
if len(UpperCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A__ = v[:, :, 0]
else:
# bias
A__ = v
else:
# qkv matrices
A__ = v.shape[0]
A__ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A__ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A__ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCamelCase__( UpperCamelCase__ : Tuple )->List[str]:
A__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A__ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
A__ = download(UpperCamelCase__ )
A__ = MODELS_MAP[model_name]['''sample_rate''']
A__ = MODELS_MAP[model_name]['''sample_size''']
A__ = Object()
A__ = sample_size
A__ = sample_rate
A__ = 0
A__ = UNetaDModel(sample_size=UpperCamelCase__ , sample_rate=UpperCamelCase__ )
A__ = diffusers_model.state_dict()
A__ = DiffusionUncond(UpperCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase__ )['''state_dict'''] )
A__ = orig_model.diffusion_ema.eval()
A__ = orig_model.state_dict()
A__ = rename_orig_weights(UpperCamelCase__ )
A__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase__ ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith('''kernel''' ) for k in list(UpperCamelCase__ ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
A__ = value.squeeze()
A__ = value
diffusers_model.load_state_dict(UpperCamelCase__ )
A__ = 1_00
A__ = 33
A__ = IPNDMScheduler(num_train_timesteps=UpperCamelCase__ )
A__ = torch.manual_seed(UpperCamelCase__ )
A__ = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase__ ).to(UpperCamelCase__ )
A__ = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase__ )[:-1]
A__ = get_crash_schedule(UpperCamelCase__ )
A__ = DanceDiffusionPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
A__ = torch.manual_seed(33 )
A__ = pipe(num_inference_steps=UpperCamelCase__ , generator=UpperCamelCase__ ).audios
A__ = sampling.iplms_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {} )
A__ = generated.clamp(-1 , 1 )
A__ = (generated - audio).abs().sum()
A__ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , UpperCamelCase__ )
print('''Diff max''' , UpperCamelCase__ )
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
a__: Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
a__: Tuple = parser.parse_args()
main(args)
| 39 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.