code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 32
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 ) -> Union[str, Any]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ = DatasetDict(
{
"train": dataset["train"].select(_lowercase ),
"validation": dataset["train"].select(_lowercase ),
"test": dataset["validation"],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
_lowercase , padding="longest" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["test"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = []
# Download the dataset
UpperCAmelCase_ = load_dataset("glue" , "mrpc" )
# Create our splits
UpperCAmelCase_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["lr"]
UpperCAmelCase_ = int(config["num_epochs"] )
UpperCAmelCase_ = int(config["seed"] )
UpperCAmelCase_ = int(config["batch_size"] )
UpperCAmelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
UpperCAmelCase_ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
UpperCAmelCase_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_fold_dataloaders(
_lowercase , _lowercase , _lowercase , _lowercase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**_lowercase )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**_lowercase )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase_ = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**_lowercase )
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase_ = torch.cat(_lowercase , dim=0 )
UpperCAmelCase_ = torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase_ = metric.compute(predictions=_lowercase , references=_lowercase )
accelerator.print("Average test metrics from all folds:" , _lowercase )
def snake_case__ ( ) -> Union[str, Any]:
UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowercase , default=_lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowercase , default=3 , help="The number of splits to perform across the dataset" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
UpperCAmelCase_ = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = HfArgumentParser(lowercase_ )
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ = TensorFlowBenchmark(args=lowercase_ )
try:
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
UpperCAmelCase_ = """ """.join(str(lowercase_ ).split(" " )[:-1] )
UpperCAmelCase_ = """"""
UpperCAmelCase_ = eval(str(lowercase_ ).split(" " )[-1] )
UpperCAmelCase_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase_ = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE = "\\n\n"
SCREAMING_SNAKE_CASE = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
SCREAMING_SNAKE_CASE = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 16 , lowerCAmelCase = True , lowerCAmelCase=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase_ = "cuda"
else:
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(_a )
UpperCAmelCase_ = model.to(_a )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase_ = model.config.max_length - 1
else:
UpperCAmelCase_ = model.config.max_length
UpperCAmelCase_ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors="pt" , return_attention_mask=_a , ).to(_a )
UpperCAmelCase_ = encodings["input_ids"]
UpperCAmelCase_ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase_ = []
UpperCAmelCase_ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
UpperCAmelCase_ = min(start_index + batch_size , len(_a ) )
UpperCAmelCase_ = encoded_texts[start_index:end_index]
UpperCAmelCase_ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
UpperCAmelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
UpperCAmelCase_ = encoded_batch
with torch.no_grad():
UpperCAmelCase_ = model(_a , attention_mask=_a ).logits
UpperCAmelCase_ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase_ = labels[..., 1:].contiguous()
UpperCAmelCase_ = attn_mask[..., 1:].contiguous()
UpperCAmelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
import math
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = BALLS_PER_COLOUR * NUM_COLOURS
def snake_case__ ( __SCREAMING_SNAKE_CASE = 20 ) -> List[str]:
UpperCAmelCase_ = math.comb(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCamelCase )
UpperCAmelCase_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCamelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = []
UpperCAmelCase_ = set()
def A__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def A__ ( self ):
return len(self.elements ) == 0
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCAmelCase )
else:
# update
# print("update", item)
UpperCAmelCase_ = []
((UpperCAmelCase_) , (UpperCAmelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((UpperCAmelCase_) , (UpperCAmelCase_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def A__ ( self , lowerCAmelCase ):
if item in self.set:
self.set.remove(lowerCAmelCase )
UpperCAmelCase_ = []
((UpperCAmelCase_) , (UpperCAmelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((UpperCAmelCase_) , (UpperCAmelCase_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def A__ ( self ):
return self.elements[0][1]
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_)) = heapq.heappop(self.elements )
self.set.remove(lowerCAmelCase )
return (priority, item)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
# euclidean distance
UpperCAmelCase_ = np.array(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = np.array(__SCREAMING_SNAKE_CASE )
return np.linalg.norm(a - b )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
# integer division by time variable
return consistent_heuristic(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) // t
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = g_function[start] + Wa * heuristics[i](__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return ans
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ = np.chararray((n, n) )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = "*"
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
if (j, (n - 1) - i) in blocks:
UpperCAmelCase_ = "#"
UpperCAmelCase_ = "-"
UpperCAmelCase_ = back_pointer[goal]
while x != start:
((UpperCAmelCase_) , (UpperCAmelCase_)) = x
# print(x)
UpperCAmelCase_ = "-"
UpperCAmelCase_ = back_pointer[x]
UpperCAmelCase_ = "-"
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
UpperCAmelCase_ = back_pointer[goal]
while x != start:
print(__SCREAMING_SNAKE_CASE , end=" " )
UpperCAmelCase_ = back_pointer[x]
print(__SCREAMING_SNAKE_CASE )
sys.exit()
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
for itera in range(__SCREAMING_SNAKE_CASE ):
open_list[itera].remove_element(__SCREAMING_SNAKE_CASE )
# print("s", s)
# print("j", j)
((UpperCAmelCase_) , (UpperCAmelCase_)) = s
UpperCAmelCase_ = (x - 1, y)
UpperCAmelCase_ = (x + 1, y)
UpperCAmelCase_ = (x, y + 1)
UpperCAmelCase_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__SCREAMING_SNAKE_CASE ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = -1
UpperCAmelCase_ = float("inf" )
if valid(__SCREAMING_SNAKE_CASE ) and g_function[neighbours] > g_function[s] + 1:
UpperCAmelCase_ = g_function[s] + 1
UpperCAmelCase_ = s
if neighbours not in close_list_anchor:
open_list[0].put(__SCREAMING_SNAKE_CASE , key(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if neighbours not in close_list_inad:
for var in range(1 , __SCREAMING_SNAKE_CASE ):
if key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) <= Wa * key(
__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
open_list[j].put(
__SCREAMING_SNAKE_CASE , key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE = make_common_ground()
SCREAMING_SNAKE_CASE = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = (n - 1, n - 1)
SCREAMING_SNAKE_CASE = 1
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = {start: 0, goal: float("inf" )}
UpperCAmelCase_ = {start: -1, goal: -1}
UpperCAmelCase_ = []
UpperCAmelCase_ = set()
for i in range(__SCREAMING_SNAKE_CASE ):
open_list.append(PriorityQueue() )
open_list[i].put(__SCREAMING_SNAKE_CASE , key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , __SCREAMING_SNAKE_CASE ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ , UpperCAmelCase_ = open_list[i].top_show()
visited.add(__SCREAMING_SNAKE_CASE )
expand_state(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
close_list_inad.append(__SCREAMING_SNAKE_CASE )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = open_list[0].top_show()
visited.add(__SCREAMING_SNAKE_CASE )
expand_state(
__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
close_list_anchor.append(__SCREAMING_SNAKE_CASE )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 700 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCAmelCase_ = "lm_head"
UpperCAmelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(SCREAMING_SNAKE_CASE_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ) -> List[Any]:
if config_path is not None:
UpperCAmelCase_ = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load_from_json(SCREAMING_SNAKE_CASE_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE_ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 42
UpperCAmelCase_ = 43
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = UniSpeechForCTC(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_ = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE_ )
if is_finetuned:
UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 701 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : str = (DDPMParallelScheduler,)
def A__ ( self , **lowerCAmelCase ):
UpperCAmelCase_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowerCAmelCase )
return config
def A__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def A__ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def A__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def A__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase )
def A__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase )
def A__ ( self ):
self.check_over_configs(thresholding=lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def A__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def A__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = self.dummy_sample_deter + 0.1
UpperCAmelCase_ = self.dummy_sample_deter - 0.1
UpperCAmelCase_ = samplea.shape[0]
UpperCAmelCase_ = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ = torch.arange(lowerCAmelCase )[0:3, None].repeat(1 , lowerCAmelCase )
UpperCAmelCase_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ = model(lowerCAmelCase , lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
UpperCAmelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase )
UpperCAmelCase_ = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase ):
if i == len(lowerCAmelCase ) - 1:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = timesteps[i + 1]
UpperCAmelCase_ = scheduler.previous_timestep(lowerCAmelCase )
UpperCAmelCase_ = prev_t.item()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
UpperCAmelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
UpperCAmelCase_ = [100, 87, 50, 1, 0]
UpperCAmelCase_ = len(lowerCAmelCase )
with self.assertRaises(lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**lowerCAmelCase )
UpperCAmelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import qiskit
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase_ = qiskit.QuantumCircuit(__lowercase , __lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase_ = qiskit.execute(__lowercase , __lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 703 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = "cpu"
SCREAMING_SNAKE_CASE = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
SCREAMING_SNAKE_CASE = "path-to-your-trained-model"
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE = torch.randn(2, 4, 64, 64)
SCREAMING_SNAKE_CASE = torch.rand(1) * 999
SCREAMING_SNAKE_CASE = torch.randn(2, 77, 768)
SCREAMING_SNAKE_CASE = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE = 666
SCREAMING_SNAKE_CASE = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE = {"generator": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
SCREAMING_SNAKE_CASE = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
SCREAMING_SNAKE_CASE = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
SCREAMING_SNAKE_CASE = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False ):
if rouge_types is None:
UpperCAmelCase_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
UpperCAmelCase_ = rouge_scorer.RougeScorer(rouge_types=snake_case__ , use_stemmer=snake_case__ )
if use_aggregator:
UpperCAmelCase_ = scoring.BootstrapAggregator()
else:
UpperCAmelCase_ = []
for ref, pred in zip(snake_case__ , snake_case__ ):
UpperCAmelCase_ = scorer.score(snake_case__ , snake_case__ )
if use_aggregator:
aggregator.add_scores(snake_case__ )
else:
scores.append(snake_case__ )
if use_aggregator:
UpperCAmelCase_ = aggregator.aggregate()
else:
UpperCAmelCase_ = {}
for key in scores[0]:
UpperCAmelCase_ = [score[key] for score in scores]
return result
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Any:
try:
UpperCAmelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ = strtobool(lowerCAmelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_SLOW", default=False)
SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_REMOTE", default=False)
SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_LOCAL", default=True)
SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
SCREAMING_SNAKE_CASE = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ",
)
# Beam
SCREAMING_SNAKE_CASE = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
SCREAMING_SNAKE_CASE = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
try:
import faiss # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires faiss" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
import regex # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires regex" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires elasticsearch" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires sqlalchemy" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
if not config.TORCH_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires PyTorch" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
if not config.TF_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires TensorFlow" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if not config.JAX_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires JAX" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
if not config.PIL_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires Pillow" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCAmelCase_ )
else:
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCAmelCase_ )
else:
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCAmelCase_ )
else:
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
def _require_spacy_model(__SCREAMING_SNAKE_CASE ):
try:
import spacy # noqa F401
spacy.load(lowerCAmelCase_ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCAmelCase_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCAmelCase_ ) )(lowerCAmelCase_ )
else:
return test_case
return _require_spacy_model
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCAmelCase_ )
else:
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCAmelCase_ )
else:
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase_ = unittest.skip("test is slow" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase_ = unittest.skip("test is local" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase_ = unittest.skip("test is packaged" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase_ = unittest.skip("test requires remote" )(lowerCAmelCase_ )
return test_case
def snake_case__ ( *__SCREAMING_SNAKE_CASE ) -> List[Any]:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowerCAmelCase_ ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase_ = decorator(lowerCAmelCase_ )
setattr(cls , lowerCAmelCase_ , lowerCAmelCase_ )
return cls
return decorate
class lowerCamelCase ( __lowerCamelCase ):
'''simple docstring'''
pass
class lowerCamelCase ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : List[Any] = 2
@contextmanager
def snake_case__ ( __SCREAMING_SNAKE_CASE=OfflineSimulationMode.CONNECTION_FAILS , __SCREAMING_SNAKE_CASE=1E-16 ) -> str:
UpperCAmelCase_ = requests.Session().request
def timeout_request(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase_ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCAmelCase_ = timeout
try:
return online_request(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase_ = url
UpperCAmelCase_ = e.args[0]
UpperCAmelCase_ = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),)
UpperCAmelCase_ = (max_retry_error,)
raise
def raise_connection_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCAmelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCAmelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCAmelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def snake_case__ ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCAmelCase_ , **lowerCAmelCase_ ) as tmp_dir:
try:
os.chdir(lowerCAmelCase_ )
yield
finally:
os.chdir(lowerCAmelCase_ )
@contextmanager
def snake_case__ ( ) -> Tuple:
import gc
gc.collect()
UpperCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case__ ( ) -> Any:
import gc
gc.collect()
UpperCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return deepcopy(lowerCAmelCase_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCAmelCase_ ).integers(0 , 100 , 10 ).tolist()
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
import decorator
from requests.exceptions import HTTPError
def _wrapper(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
try:
return func(*lowerCAmelCase_ , **lowerCAmelCase_ )
except HTTPError as err:
if str(lowerCAmelCase_ ).startswith("500" ) or str(lowerCAmelCase_ ).startswith("502" ):
pytest.xfail(str(lowerCAmelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCAmelCase_ )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = returncode
UpperCAmelCase_ = stdout
UpperCAmelCase_ = stderr
async def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
while True:
UpperCAmelCase_ = await stream.readline()
if line:
callback(lowerCAmelCase_ )
else:
break
async def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Any:
if echo:
print("\nRunning: " , " ".join(lowerCAmelCase_ ) )
UpperCAmelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="" ):
UpperCAmelCase_ = line.decode("utf-8" ).rstrip()
sink.append(lowerCAmelCase_ )
if not quiet:
print(lowerCAmelCase_ , lowerCAmelCase_ , file=lowerCAmelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __SCREAMING_SNAKE_CASE : tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda __SCREAMING_SNAKE_CASE : tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCAmelCase_ , )
return _RunOutput(await p.wait() , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=180 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ) -> int:
UpperCAmelCase_ = asyncio.get_event_loop()
UpperCAmelCase_ = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase_ , env=lowerCAmelCase_ , stdin=lowerCAmelCase_ , timeout=lowerCAmelCase_ , quiet=lowerCAmelCase_ , echo=lowerCAmelCase_ ) )
UpperCAmelCase_ = " ".join(lowerCAmelCase_ )
if result.returncode > 0:
UpperCAmelCase_ = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def snake_case__ ( ) -> List[str]:
UpperCAmelCase_ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
UpperCAmelCase_ = re.sub(R"^gw" , "" , lowerCAmelCase_ , 0 , re.M )
return int(lowerCAmelCase_ )
def snake_case__ ( ) -> Tuple:
UpperCAmelCase_ = 2_9500
UpperCAmelCase_ = pytest_xdist_worker_id()
return port + uniq_delta
| 706 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 0 |
'''simple docstring'''
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(_snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(_snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 0 |
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = name
UpperCAmelCase_ = val
def __str__( self ):
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , lowerCAmelCase ):
return self.val < other.val
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = self.build_heap(_SCREAMING_SNAKE_CASE )
def __getitem__( self , lowerCAmelCase ):
return self.get_value(_SCREAMING_SNAKE_CASE )
def A__ ( self , lowerCAmelCase ):
return (idx - 1) // 2
def A__ ( self , lowerCAmelCase ):
return idx * 2 + 1
def A__ ( self , lowerCAmelCase ):
return idx * 2 + 2
def A__ ( self , lowerCAmelCase ):
return self.heap_dict[key]
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(_SCREAMING_SNAKE_CASE ) - 1
UpperCAmelCase_ = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
for idx, i in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = idx
UpperCAmelCase_ = i.val
for i in range(_SCREAMING_SNAKE_CASE , -1 , -1 ):
self.sift_down(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return array
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
while True:
UpperCAmelCase_ = self.get_left_child_idx(_SCREAMING_SNAKE_CASE ) # noqa: E741
UpperCAmelCase_ = self.get_right_child_idx(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = idx
if l < len(_SCREAMING_SNAKE_CASE ) and array[l] < array[idx]:
UpperCAmelCase_ = l
if r < len(_SCREAMING_SNAKE_CASE ) and array[r] < array[smallest]:
UpperCAmelCase_ = r
if smallest != idx:
UpperCAmelCase_ , UpperCAmelCase_ = array[smallest], array[idx]
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase_ = smallest
else:
break
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[idx], self.heap[p]
UpperCAmelCase_ , UpperCAmelCase_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase_ = p
UpperCAmelCase_ = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
def A__ ( self ):
return self.heap[0]
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[-1], self.heap[0]
UpperCAmelCase_ , UpperCAmelCase_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def A__ ( self , lowerCAmelCase ):
self.heap.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = len(self.heap ) - 1
UpperCAmelCase_ = node.val
self.sift_up(len(self.heap ) - 1 )
def A__ ( self ):
return len(self.heap ) == 0
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase_ = new_value
UpperCAmelCase_ = new_value
self.sift_up(self.idx_of_element[node] )
SCREAMING_SNAKE_CASE = Node("R", -1)
SCREAMING_SNAKE_CASE = Node("B", 6)
SCREAMING_SNAKE_CASE = Node("A", 3)
SCREAMING_SNAKE_CASE = Node("X", 1)
SCREAMING_SNAKE_CASE = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
SCREAMING_SNAKE_CASE = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
SCREAMING_SNAKE_CASE = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
SCREAMING_SNAKE_CASE = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase ( _A ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Any = ConvBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**UpperCamelCase__ )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCamelCase ( snake_case__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , lowerCAmelCase ):
if type(_A ) == dict:
UpperCAmelCase_ = Namespace(**_A )
UpperCAmelCase_ = glue_output_modes[hparams.task]
UpperCAmelCase_ = glue_tasks_num_labels[hparams.task]
super().__init__(_A , _A , self.mode )
def A__ ( self , **lowerCAmelCase ):
return self.model(**_A )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase_ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
UpperCAmelCase_ = self(**_A )
UpperCAmelCase_ = outputs[0]
UpperCAmelCase_ = self.trainer.lr_schedulers[0]['scheduler']
UpperCAmelCase_ = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A__ ( self ):
UpperCAmelCase_ = self.hparams
UpperCAmelCase_ = processors[args.task]()
UpperCAmelCase_ = processor.get_labels()
for mode in ["train", "dev"]:
UpperCAmelCase_ = self._feature_file(_A )
if os.path.exists(_A ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _A )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCAmelCase_ = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
UpperCAmelCase_ = convert_examples_to_features(
_A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , _A )
torch.save(_A , _A )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ):
UpperCAmelCase_ = 'dev' if mode == 'test' else mode
UpperCAmelCase_ = self._feature_file(_A )
logger.info("Loading features from cached file %s" , _A )
UpperCAmelCase_ = torch.load(_A )
UpperCAmelCase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
UpperCAmelCase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_A , _A , _A , _A ) , batch_size=_A , shuffle=_A , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase_ = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
UpperCAmelCase_ = self(**_A )
UpperCAmelCase_ = outputs[:2]
UpperCAmelCase_ = logits.detach().cpu().numpy()
UpperCAmelCase_ = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
UpperCAmelCase_ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase_ = np.argmax(_A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase_ = np.squeeze(_A )
UpperCAmelCase_ = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCAmelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , _A , _A )}
UpperCAmelCase_ = dict(results.items() )
UpperCAmelCase_ = results
return ret, preds_list, out_label_list
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self._eval_end(_A )
UpperCAmelCase_ = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self._eval_end(_A )
UpperCAmelCase_ = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
BaseTransformer.add_model_specific_args(_A , _A )
parser.add_argument(
"--max_seq_length" , default=128 , type=_A , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=_A , required=_A , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=_A , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def snake_case__ ( ) -> Tuple:
UpperCAmelCase_ = argparse.ArgumentParser()
add_generic_args(__SCREAMING_SNAKE_CASE , os.getcwd() )
UpperCAmelCase_ = GLUETransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , os.getcwd() )
UpperCAmelCase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCAmelCase_ = os.path.join(
"./results" , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
UpperCAmelCase_ = GLUETransformer(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = generic_train(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 710 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[Tuple[int, ...]]:
UpperCAmelCase_ = []
if isinstance(__snake_case , __snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple[int, ...]:
UpperCAmelCase_ = []
for d in reversed(__snake_case ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(__snake_case ) )
@torch.jit.ignore
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> List[Tuple[slice, ...]]:
def reduce_edge_list(__SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = True
for i in range(len(__snake_case ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(__snake_case )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(__snake_case , __snake_case )]
reduce_edge_list(__snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__snake_case ) == 0:
return [()]
elif len(__snake_case ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__snake_case , __snake_case ):
if s == e:
path_list.append(slice(__snake_case , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(__snake_case )
UpperCAmelCase_ = len(__snake_case )
# start == end, and we're done
if divergence_idx == len(__snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(__snake_case , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(__snake_case , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> torch.Tensor:
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(__snake_case , __snake_case ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , __snake_case ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
__snake_case , __snake_case , __snake_case , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ) -> Any:
if not (len(__snake_case ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(__snake_case )]
UpperCAmelCase_ = tuple([max(__snake_case ) for s in zip(*__snake_case )] )
def _prep_inputs(__SCREAMING_SNAKE_CASE ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , __snake_case )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__SCREAMING_SNAKE_CASE ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(__snake_case ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=__snake_case , flat_end=min(__snake_case , i + chunk_size ) , no_batch_dims=len(__snake_case ) , )
UpperCAmelCase_ = tensor_tree_map(__snake_case , __snake_case )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**__snake_case )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __snake_case )
# Put the chunk in its pre-allocated space
if isinstance(__snake_case , __snake_case ):
def assign(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
for k, v in da.items():
if isinstance(__snake_case , __snake_case ):
assign(__snake_case , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for xa, xa in zip(__snake_case , __snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(__snake_case , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : t.view(orig_batch_dims + t.shape[1:] ) , __snake_case )
return out
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCAmelCase ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda lowerCAmelCase : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase :
def A__ ( self , lowerCAmelCase ):
raise NotImplementedError()
def A__ ( self ):
raise NotImplementedError()
class lowerCamelCase ( lowercase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = False , **lowerCAmelCase ):
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = skip_prompt
UpperCAmelCase_ = decode_kwargs
# variables used in the streaming process
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = True
def A__ ( self , lowerCAmelCase ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
UpperCAmelCase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
UpperCAmelCase_ = text[self.print_len :]
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase_ = text[self.print_len :]
self.print_len += len(lowerCamelCase_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase_ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCamelCase_ )
self.on_finalized_text(lowerCamelCase_ )
def A__ ( self ):
if len(self.token_cache ) > 0:
UpperCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase_ = text[self.print_len :]
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = """"""
UpperCAmelCase_ = True
self.on_finalized_text(lowerCamelCase_ , stream_end=lowerCamelCase_ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
print(lowerCamelCase_ , flush=lowerCamelCase_ , end="" if not stream_end else None )
def A__ ( self , lowerCAmelCase ):
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
class lowerCamelCase ( lowercase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = None , **lowerCAmelCase ):
super().__init__(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ = Queue()
UpperCAmelCase_ = None
UpperCAmelCase_ = timeout
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
self.text_queue.put(lowerCamelCase_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def A__ ( self ):
UpperCAmelCase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = start
UpperCAmelCase_ = end
UpperCAmelCase_ = val
UpperCAmelCase_ = (start + end) // 2
UpperCAmelCase_ = left
UpperCAmelCase_ = right
def __repr__( self ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = collection
UpperCAmelCase_ = function
if self.collection:
UpperCAmelCase_ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
UpperCAmelCase_ = (start + end) // 2
UpperCAmelCase_ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase_ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if node.start == i and node.end == i:
UpperCAmelCase_ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase_ = self.fn(node.left.val , node.right.val )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def A__ ( self ):
if self.root is not None:
UpperCAmelCase_ = Queue()
queue.put(self.root )
while not queue.empty():
UpperCAmelCase_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
SCREAMING_SNAKE_CASE = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = XLMRobertaTokenizer
lowerCAmelCase_ : Optional[Any] = XLMRobertaTokenizerFast
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = True
def A__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMRobertaTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase_ = "<pad>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase ) , 1002 )
def A__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def A__ ( self ):
UpperCAmelCase_ = XLMRobertaTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def A__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCAmelCase , lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(lowerCAmelCase , legacy_format=lowerCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase , lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
shutil.rmtree(lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(lowerCAmelCase , legacy_format=lowerCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
shutil.rmtree(lowerCAmelCase )
@cached_property
def A__ ( self ):
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def A__ ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name )
UpperCAmelCase_ = XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase )
UpperCAmelCase_ = pickle.dumps(lowerCAmelCase )
pickle.loads(lowerCAmelCase )
def A__ ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def A__ ( self ):
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def A__ ( self ):
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def A__ ( self ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import unittest
from knapsack import knapsack as k
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = [0]
UpperCAmelCase_ = [0]
UpperCAmelCase_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
UpperCAmelCase_ = [60]
UpperCAmelCase_ = [10]
UpperCAmelCase_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
def A__ ( self ):
UpperCAmelCase_ = 3
UpperCAmelCase_ = [1, 2, 3]
UpperCAmelCase_ = [3, 2, 1]
UpperCAmelCase_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 5 )
def A__ ( self ):
UpperCAmelCase_ = 50
UpperCAmelCase_ = [60, 100, 120]
UpperCAmelCase_ = [10, 20, 30]
UpperCAmelCase_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase_ = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase_ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = dct.pop(snake_case__ )
UpperCAmelCase_ = val
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
if "handwritten" in checkpoint_url:
UpperCAmelCase_ = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase_ = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = ViTConfig(image_size=384 , qkv_bias=snake_case__ )
UpperCAmelCase_ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase_ = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
UpperCAmelCase_ = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase_ = False
UpperCAmelCase_ = "relu"
UpperCAmelCase_ = 1024
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
# load HuggingFace model
UpperCAmelCase_ = ViTModel(snake_case__ , add_pooling_layer=snake_case__ )
UpperCAmelCase_ = TrOCRForCausalLM(snake_case__ )
UpperCAmelCase_ = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case__ , map_location="cpu" , check_hash=snake_case__ )["model"]
UpperCAmelCase_ = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase_ = state_dict.pop(snake_case__ )
if key.startswith("decoder" ) and "output_projection" not in key:
UpperCAmelCase_ = val
else:
UpperCAmelCase_ = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
UpperCAmelCase_ = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase_ = RobertaTokenizer.from_pretrained("roberta-large" )
UpperCAmelCase_ = TrOCRProcessor(snake_case__ , snake_case__ )
UpperCAmelCase_ = processor(images=prepare_img(snake_case__ ) , return_tensors="pt" ).pixel_values
# verify logits
UpperCAmelCase_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase_ = model(pixel_values=snake_case__ , decoder_input_ids=snake_case__ )
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case__ , atol=1E-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = 13
UpperCAmelCase_ = 7
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = 99
UpperCAmelCase_ = 32
UpperCAmelCase_ = 2
UpperCAmelCase_ = 4
UpperCAmelCase_ = 37
UpperCAmelCase_ = '''gelu'''
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 512
UpperCAmelCase_ = 16
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0.02
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
UpperCAmelCase_ = None
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TFDistilBertModel(config=UpperCAmelCase__ )
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ = model(UpperCAmelCase__ )
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TFDistilBertForMaskedLM(config=UpperCAmelCase__ )
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ )
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFDistilBertForSequenceClassification(UpperCAmelCase__ )
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = TFDistilBertForMultipleChoice(UpperCAmelCase__ )
UpperCAmelCase_ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFDistilBertForTokenClassification(UpperCAmelCase__ )
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(UpperCAmelCase_) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCAmelCase_ : int = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : int = False
def A__ ( self ):
UpperCAmelCase_ = TFDistilBertModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
@slow
def A__ ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase_ = TFDistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ = model(UpperCAmelCase__ )[0]
UpperCAmelCase_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase__ )
UpperCAmelCase_ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 )
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
SCREAMING_SNAKE_CASE = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 'trocr'
lowerCAmelCase_ : Tuple = ['past_key_values']
lowerCAmelCase_ : Tuple = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , lowerCAmelCase=5_0265 , lowerCAmelCase=1024 , lowerCAmelCase=12 , lowerCAmelCase=16 , lowerCAmelCase=4096 , lowerCAmelCase="gelu" , lowerCAmelCase=512 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = init_std
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = scale_embedding
UpperCAmelCase_ = use_learned_position_embeddings
UpperCAmelCase_ = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ) -> int:
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
UpperCAmelCase_ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCAmelCase_ = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
UpperCAmelCase_ = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = True
UpperCAmelCase_ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
UpperCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCAmelCase_ = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCAmelCase_ = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
UpperCAmelCase_ = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCAmelCase_ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase_ = pt_model(**pt_model.dummy_inputs )
UpperCAmelCase_ = pto[0].numpy()
UpperCAmelCase_ = tfo[0].numpy()
UpperCAmelCase_ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="h5" )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , ) -> Any:
if args_model_type is None:
UpperCAmelCase_ = list(MODEL_CLASSES.keys() )
else:
UpperCAmelCase_ = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
UpperCAmelCase_ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCAmelCase_ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCAmelCase_ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
UpperCAmelCase_ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
UpperCAmelCase_ = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
UpperCAmelCase_ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCAmelCase_ = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
UpperCAmelCase_ = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
SCREAMING_SNAKE_CASE = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE = NewType("DataClass", Any)
SCREAMING_SNAKE_CASE = NewType("DataClassType", Any)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Callable[[str], Any]:
UpperCAmelCase_ = {str(__SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda __SCREAMING_SNAKE_CASE : str_to_choice.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case__ ( *,
__SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase_ = {}
if aliases is not None:
UpperCAmelCase_ = aliases
if help is not None:
UpperCAmelCase_ = help
return dataclasses.field(metadata=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , default_factory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 42
def __init__( self , lowerCAmelCase , **lowerCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCAmelCase_ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase__ )
if dataclasses.is_dataclass(UpperCAmelCase__ ):
UpperCAmelCase_ = [dataclass_types]
UpperCAmelCase_ = list(UpperCAmelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase__ )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = f'''--{field.name}'''
UpperCAmelCase_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCAmelCase_ = kwargs.pop("aliases" , [] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ = [aliases]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase__ , "UnionType" ) and isinstance(UpperCAmelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(UpperCAmelCase__ ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase_ = (
field.type.__args__[0] if isinstance(UpperCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase_ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase__ ) and issubclass(field.type , UpperCAmelCase__ )):
if origin_type is Literal:
UpperCAmelCase_ = field.type.__args__
else:
UpperCAmelCase_ = [x.value for x in field.type]
UpperCAmelCase_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
else:
UpperCAmelCase_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase_ = copy(UpperCAmelCase__ )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase_ = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase_ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase_ = True
elif isclass(UpperCAmelCase__ ) and issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ = field.type.__args__[0]
UpperCAmelCase_ = '''+'''
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
else:
UpperCAmelCase_ = True
parser.add_argument(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase_ = False
parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **UpperCAmelCase__ )
def A__ ( self , lowerCAmelCase ):
if hasattr(UpperCAmelCase__ , "_argument_group_name" ):
UpperCAmelCase_ = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase_ = self
try:
UpperCAmelCase_ = get_type_hints(UpperCAmelCase__ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase__ ):
UpperCAmelCase_ = '''.'''.join(map(UpperCAmelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase__ ):
if not field.init:
continue
UpperCAmelCase_ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase__ , UpperCAmelCase__ )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase_ = []
if args_filename:
args_files.append(Path(UpperCAmelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase_ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase__ , type=UpperCAmelCase__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase_ = args_file_parser.parse_known_args(args=UpperCAmelCase__ )
UpperCAmelCase_ = vars(UpperCAmelCase__ ).get(args_file_flag.lstrip("-" ) , UpperCAmelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase__ ) for p in cmd_args_file_paths] )
UpperCAmelCase_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase_ = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase_ = self.parse_known_args(args=UpperCAmelCase__ )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
UpperCAmelCase_ = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
UpperCAmelCase_ = set(args.keys() )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
UpperCAmelCase_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase_ = dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase__ )}''' )
return tuple(UpperCAmelCase__ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
with open(Path(UpperCAmelCase__ ) , encoding="utf-8" ) as open_json_file:
UpperCAmelCase_ = json.loads(open_json_file.read() )
UpperCAmelCase_ = self.parse_dict(UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
UpperCAmelCase_ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase__ ).read_text() ) , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_lowerCAmelCase , max_perimeter + 1 ):
UpperCAmelCase_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_lowerCAmelCase ):
UpperCAmelCase_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case__ ( __SCREAMING_SNAKE_CASE = 1000 ) -> str:
UpperCAmelCase_ = pythagorean_triple(_lowerCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 700 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=14 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = None
UpperCAmelCase_ = vocab_size - 1
UpperCAmelCase_ = vocab_size - 1
UpperCAmelCase_ = vocab_size - 1
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(_lowercase )
UpperCAmelCase_ = model.init_cache(input_ids.shape[0] , _lowercase )
UpperCAmelCase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
UpperCAmelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model(
input_ids[:, -1:] , attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , position_ids=_lowercase , )
UpperCAmelCase_ = model(_lowercase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(_lowercase )
UpperCAmelCase_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(input_ids.shape[0] , _lowercase )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
UpperCAmelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowercase , position_ids=_lowercase , )
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase_ : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A__ ( self ):
UpperCAmelCase_ = FlaxGPTJModelTester(self )
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase , _lowercase )
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowercase , _lowercase , _lowercase , _lowercase )
@tooslow
def A__ ( self ):
UpperCAmelCase_ = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
UpperCAmelCase_ = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_lowercase , truncation=_lowercase )
UpperCAmelCase_ = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
UpperCAmelCase_ = False
UpperCAmelCase_ = model.config.eos_token_id
UpperCAmelCase_ = jax.jit(model.generate )
UpperCAmelCase_ = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
UpperCAmelCase_ = [
"Hello this is a long string of text.\n\nI\'m trying to get the text of the",
"Hey, I\'m a little late to the party. I\'m going to",
]
self.assertListEqual(_lowercase , _lowercase )
@is_pt_flax_cross_test
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase )
UpperCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ = getattr(_lowercase , _lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = pt_inputs["input_ids"].shape
UpperCAmelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = pt_model_class(_lowercase ).eval()
UpperCAmelCase_ = model_class(_lowercase , dtype=jnp.floataa )
UpperCAmelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
UpperCAmelCase_ = fx_state
with torch.no_grad():
UpperCAmelCase_ = pt_model(**_lowercase ).to_tuple()
UpperCAmelCase_ = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
UpperCAmelCase_ = model_class.from_pretrained(_lowercase , from_pt=_lowercase )
UpperCAmelCase_ = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase )
UpperCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ = getattr(_lowercase , _lowercase )
UpperCAmelCase_ = pt_model_class(_lowercase ).eval()
UpperCAmelCase_ = model_class(_lowercase , dtype=jnp.floataa )
UpperCAmelCase_ = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
UpperCAmelCase_ , UpperCAmelCase_ = pt_inputs["input_ids"].shape
UpperCAmelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase_ = pt_model(**_lowercase ).to_tuple()
UpperCAmelCase_ = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
UpperCAmelCase_ = pt_model_class.from_pretrained(_lowercase , from_flax=_lowercase )
with torch.no_grad():
UpperCAmelCase_ = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
UpperCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( snake_case__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = GPTaTokenizer
lowerCAmelCase_ : List[Any] = GPTaTokenizerFast
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Dict = {'add_prefix_space': True}
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
UpperCAmelCase_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def A__ ( self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def A__ ( self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def A__ ( self ):
UpperCAmelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase_ = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def A__ ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
UpperCAmelCase_ = "lower newer"
# Testing tokenization
UpperCAmelCase_ = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
UpperCAmelCase_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
UpperCAmelCase_ = tokenizer.encode(lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase_ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing the unknown token
UpperCAmelCase_ = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A__ ( self , lowerCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def A__ ( self ):
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input looooooooong", "This is a simple input"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer(lowercase_ , padding="max_length" , max_length=30 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="np" )
UpperCAmelCase_ = tokenizer(*lowercase_ , padding="max_length" , max_length=60 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A__ ( self ):
UpperCAmelCase_ = "$$$"
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase_ , add_bos_token=lowercase_ )
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer(lowercase_ )
UpperCAmelCase_ = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0] , lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase_ = tokenizer.decode(out_s.input_ids )
UpperCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A__ ( self ):
pass
def A__ ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
UpperCAmelCase_ = [self.get_tokenizer(do_lower_case=lowercase_ , add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ = "Encode this."
UpperCAmelCase_ = "This one too please."
UpperCAmelCase_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ = tokenizer.encode_plus(
lowercase_ , lowercase_ , add_special_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , )
UpperCAmelCase_ = encoded_sequence_dict["input_ids"]
UpperCAmelCase_ = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
UpperCAmelCase_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
UpperCAmelCase_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_ , lowercase_ )
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowercase_ )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./test_opt" )
UpperCAmelCase_ = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
def A__ ( self ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowercase_ )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowercase_ , )
# Same as above
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A__ ( self ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowercase_ )
UpperCAmelCase_ = "bos"
UpperCAmelCase_ = tokenizer.get_vocab()["bos"]
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowercase_ , )
# We changed the bos token
self.assertEqual(lowercase_ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
UpperCAmelCase_ = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [3_1957, 250, 1345, 9, 10, 4758] )
| 703 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE = 1000 ) -> str:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = analyze_text(_lowerCamelCase )
UpperCAmelCase_ = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase_ = sum(single_char_strings.values() )
# one length string
UpperCAmelCase_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase_ = single_char_strings[ch]
UpperCAmelCase_ = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
UpperCAmelCase_ = sum(two_char_strings.values() )
UpperCAmelCase_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase_ = cha + cha
if sequence in two_char_strings:
UpperCAmelCase_ = two_char_strings[sequence]
UpperCAmelCase_ = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> tuple[dict, dict]:
UpperCAmelCase_ = Counter() # type: ignore
UpperCAmelCase_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def snake_case__ ( ) -> List[str]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 706 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 0 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = VideoMAEConfig()
set_architecture_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "finetuned" not in model_name:
UpperCAmelCase_ = False
if "finetuned" in model_name:
UpperCAmelCase_ = """huggingface/label-files"""
if "kinetics" in model_name:
UpperCAmelCase_ = 400
UpperCAmelCase_ = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
UpperCAmelCase_ = 174
UpperCAmelCase_ = """something-something-v2-id2label.json"""
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
UpperCAmelCase_ = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if "small" in model_name:
UpperCAmelCase_ = 384
UpperCAmelCase_ = 1536
UpperCAmelCase_ = 12
UpperCAmelCase_ = 16
UpperCAmelCase_ = 12
UpperCAmelCase_ = 3
UpperCAmelCase_ = 192
UpperCAmelCase_ = 768
elif "large" in model_name:
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
UpperCAmelCase_ = 12
UpperCAmelCase_ = 8
UpperCAmelCase_ = 512
UpperCAmelCase_ = 2048
elif "huge" in model_name:
UpperCAmelCase_ = 1280
UpperCAmelCase_ = 5120
UpperCAmelCase_ = 32
UpperCAmelCase_ = 16
UpperCAmelCase_ = 12
UpperCAmelCase_ = 8
UpperCAmelCase_ = 640
UpperCAmelCase_ = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
if "encoder." in name:
UpperCAmelCase_ = name.replace("encoder." , "" )
if "cls_token" in name:
UpperCAmelCase_ = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
UpperCAmelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
UpperCAmelCase_ = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
UpperCAmelCase_ = name.replace("attn" , "attention.self" )
if "attn" in name:
UpperCAmelCase_ = name.replace("attn" , "attention.attention" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
UpperCAmelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
UpperCAmelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
UpperCAmelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
UpperCAmelCase_ = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
UpperCAmelCase_ = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace("head" , "classifier" )
return name
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if key.startswith("encoder." ):
UpperCAmelCase_ = key.replace("encoder." , "" )
if "qkv" in key:
UpperCAmelCase_ = key.split("." )
if key.startswith("decoder.blocks" ):
UpperCAmelCase_ = config.decoder_hidden_size
UpperCAmelCase_ = int(key_split[2] )
UpperCAmelCase_ = """decoder.decoder_layers."""
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = config.hidden_size
UpperCAmelCase_ = int(key_split[1] )
UpperCAmelCase_ = """videomae.encoder.layer."""
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val
return orig_state_dict
def snake_case__ ( ) -> Union[str, Any]:
UpperCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCAmelCase_ = np.load(__SCREAMING_SNAKE_CASE )
return list(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = get_videomae_config(__SCREAMING_SNAKE_CASE )
if "finetuned" in model_name:
UpperCAmelCase_ = VideoMAEForVideoClassification(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = VideoMAEForPreTraining(__SCREAMING_SNAKE_CASE )
# download original checkpoint, hosted on Google Drive
UpperCAmelCase_ = """pytorch_model.bin"""
gdown.cached_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )
if "model" in files:
UpperCAmelCase_ = files["""model"""]
else:
UpperCAmelCase_ = files["""module"""]
UpperCAmelCase_ = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# verify model on basic input
UpperCAmelCase_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
UpperCAmelCase_ = prepare_video()
UpperCAmelCase_ = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt" )
if "finetuned" not in model_name:
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
UpperCAmelCase_ = torch.Size([1, 400] )
UpperCAmelCase_ = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
UpperCAmelCase_ = torch.Size([1, 174] )
UpperCAmelCase_ = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
UpperCAmelCase_ = torch.Size([1, 1408, 1536] )
UpperCAmelCase_ = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
UpperCAmelCase_ = torch.Size([1, 1408, 1536] )
UpperCAmelCase_ = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
UpperCAmelCase_ = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
UpperCAmelCase_ = torch.Size([1, 1408, 1536] )
UpperCAmelCase_ = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
UpperCAmelCase_ = torch.Size([1, 400] )
UpperCAmelCase_ = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
UpperCAmelCase_ = torch.Size([1, 400] )
UpperCAmelCase_ = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
UpperCAmelCase_ = torch.Size([1, 400] )
UpperCAmelCase_ = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
UpperCAmelCase_ = torch.Size([1, 400] )
UpperCAmelCase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
UpperCAmelCase_ = torch.Size([1, 1408, 1536] )
UpperCAmelCase_ = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
UpperCAmelCase_ = torch.Size([1, 174] )
UpperCAmelCase_ = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
UpperCAmelCase_ = torch.Size([1, 1408, 1536] )
UpperCAmelCase_ = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
UpperCAmelCase_ = torch.Size([1, 174] )
UpperCAmelCase_ = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
UpperCAmelCase_ = outputs.loss
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__SCREAMING_SNAKE_CASE , organization="nielsr" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 707 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return "".join(sorted(lowerCAmelCase__ ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return word_by_signature[signature(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
SCREAMING_SNAKE_CASE = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 708 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( a__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : str = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def A__ ( self , lowerCAmelCase=0 ):
UpperCAmelCase_ = np.random.RandomState(lowerCamelCase_ )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**lowerCamelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**lowerCamelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**lowerCamelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**lowerCamelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**lowerCamelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**lowerCamelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = pipe(**lowerCamelCase_ )
UpperCAmelCase_ = output.images[0, -3:, -3:, -1]
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = pipe.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="np" , )
UpperCAmelCase_ = text_inputs["input_ids"]
UpperCAmelCase_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
UpperCAmelCase_ = prompt_embeds
# forward
UpperCAmelCase_ = pipe(**lowerCamelCase_ )
UpperCAmelCase_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = 3 * ["this is a negative prompt"]
UpperCAmelCase_ = negative_prompt
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = pipe(**lowerCamelCase_ )
UpperCAmelCase_ = output.images[0, -3:, -3:, -1]
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ = pipe.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="np" , )
UpperCAmelCase_ = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
UpperCAmelCase_ , UpperCAmelCase_ = embeds
# forward
UpperCAmelCase_ = pipe(**lowerCamelCase_ )
UpperCAmelCase_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self ):
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def A__ ( self ):
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
np.random.seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ):
UpperCAmelCase_ = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = "open neural network exchange"
UpperCAmelCase_ = np.random.RandomState(0 )
UpperCAmelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ):
UpperCAmelCase_ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = "open neural network exchange"
UpperCAmelCase_ = np.random.RandomState(0 )
UpperCAmelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ):
UpperCAmelCase_ = 0
def test_callback_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
UpperCAmelCase_ = False
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ = "Andromeda galaxy in a bottle"
UpperCAmelCase_ = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def A__ ( self ):
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
SCREAMING_SNAKE_CASE = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
SCREAMING_SNAKE_CASE = "|".join(sys.argv[1:])
SCREAMING_SNAKE_CASE = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
SCREAMING_SNAKE_CASE = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 710 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
UpperCAmelCase_ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ = int(shortest_edge / crop_pct )
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ = resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCAmelCase_ , size=(shortest_edge, shortest_edge) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCAmelCase_ , size=(shortest_edge, shortest_edge) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , crop_pct=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> typing.Counter[int]:
UpperCAmelCase_ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a_ , max_perimeter + 1 ):
UpperCAmelCase_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a_ ):
UpperCAmelCase_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case__ ( __SCREAMING_SNAKE_CASE = 1000 ) -> int:
UpperCAmelCase_ = pythagorean_triple(a_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
pass
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
for shard in shards:
for i in range(UpperCAmelCase__ ):
yield {"i": i, "shard": shard}
def snake_case__ ( ) -> List[str]:
UpperCAmelCase_ = int(os.environ["RANK"] )
UpperCAmelCase_ = int(os.environ["WORLD_SIZE"] )
UpperCAmelCase_ = ArgumentParser()
parser.add_argument("--streaming" , type=UpperCAmelCase__ )
parser.add_argument("--local_rank" , type=UpperCAmelCase__ )
parser.add_argument("--num_workers" , type=UpperCAmelCase__ , default=0 )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.streaming
UpperCAmelCase_ = args.num_workers
UpperCAmelCase_ = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(UpperCAmelCase__ )]}
UpperCAmelCase_ = IterableDataset.from_generator(UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ )
if not streaming:
UpperCAmelCase_ = Dataset.from_list(list(UpperCAmelCase__ ) )
UpperCAmelCase_ = split_dataset_by_node(UpperCAmelCase__ , rank=UpperCAmelCase__ , world_size=UpperCAmelCase__ )
UpperCAmelCase_ = torch.utils.data.DataLoader(UpperCAmelCase__ , num_workers=UpperCAmelCase__ )
UpperCAmelCase_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = XLMTokenizer
lowerCAmelCase_ : List[Any] = False
def A__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def A__ ( self ):
UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = "lower"
UpperCAmelCase_ = ["low", "er</w>"]
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokens + ["<unk>"]
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
@slow
def A__ ( self ):
UpperCAmelCase_ = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
SCREAMING_SNAKE_CASE = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Tuple = ['input_ids', 'attention_mask']
lowerCAmelCase_ : Tuple = []
def __init__( self , lowerCAmelCase , lowerCAmelCase="<unk>" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[MASK]" , lowerCAmelCase="[CLS]" , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sep_token=lowerCAmelCase , mask_token=lowerCAmelCase , cls_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def A__ ( self ):
return self.sp_model.get_piece_size()
def A__ ( self ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , lowerCAmelCase ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCAmelCase ):
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return self.sp_model.piece_to_id(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.sp_model.IdToPiece(lowerCAmelCase )
return token
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = True , **lowerCAmelCase , ):
UpperCAmelCase_ = kwargs.pop("use_source_tokenizer" , lowerCAmelCase )
UpperCAmelCase_ = self.convert_ids_to_tokens(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase ) )
UpperCAmelCase_ = []
sub_texts.append(lowerCAmelCase )
else:
current_sub_text.append(lowerCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase_ = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(lowerCAmelCase ) )
else:
UpperCAmelCase_ = "".join(lowerCAmelCase )
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(lowerCAmelCase )
return clean_text
else:
return text
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = "canine"
def __init__( self , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_6384 , lowerCAmelCase=16 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=0 , lowerCAmelCase=0xE000 , lowerCAmelCase=0xE001 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=8 , lowerCAmelCase=1_6384 , lowerCAmelCase=128 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
# Character config:
UpperCAmelCase_ = downsampling_rate
UpperCAmelCase_ = upsampling_kernel_size
UpperCAmelCase_ = num_hash_functions
UpperCAmelCase_ = num_hash_buckets
UpperCAmelCase_ = local_transformer_stride
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
SCREAMING_SNAKE_CASE = {
'google/pegasus-xsum': 512,
}
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase_ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Any = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=103 , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCAmelCase )}, but is'''
f''' {type(lowerCAmelCase )}''' )
UpperCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCAmelCase ) , self.offset - 1 )
]
if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCAmelCase_ = additional_special_tokens_extended
else:
UpperCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase_ = mask_token_sent
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# add special tokens to encoder dict
UpperCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def A__ ( self ):
return len(self.sp_model ) + self.offset
def A__ ( self ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , lowerCAmelCase ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCAmelCase ):
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase_ = self.sp_model.piece_to_id(lowerCAmelCase )
return sp_id + self.offset
def A__ ( self , lowerCAmelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def A__ ( self , lowerCAmelCase=False ):
return 1
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
from __future__ import annotations
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
if len(lowerCamelCase__ ) == 0:
return False
UpperCAmelCase_ = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter numbers separated by comma:\n").strip()
SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(",")]
SCREAMING_SNAKE_CASE = int(input("Enter the number to be found in the list:\n").strip())
SCREAMING_SNAKE_CASE = "" if binary_search(sequence, target) else "not "
print(f'''{target} was {not_str}found in {sequence}''')
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=0.0 , lowerCAmelCase = None , lowerCAmelCase = "geglu" , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = True , lowerCAmelCase = "layer_norm" , lowerCAmelCase = False , ):
super().__init__()
UpperCAmelCase_ = only_cross_attention
UpperCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
UpperCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCAmelCase_ = AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase_ = AdaLayerNormZero(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = Attention(
query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_SCREAMING_SNAKE_CASE , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCAmelCase_ = (
AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
)
UpperCAmelCase_ = Attention(
query_dim=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , upcast_attention=_SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# 3. Feed-forward
UpperCAmelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = FeedForward(_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , final_dropout=_SCREAMING_SNAKE_CASE )
# let chunk size default to None
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = chunk_size
UpperCAmelCase_ = dim
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCAmelCase_ = self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.norma(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype )
else:
UpperCAmelCase_ = self.norma(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCAmelCase_ = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = gate_msa.unsqueeze(1 ) * attn_output
UpperCAmelCase_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCAmelCase_ = (
self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(_SCREAMING_SNAKE_CASE )
)
UpperCAmelCase_ = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = attn_output + hidden_states
# 3. Feed-forward
UpperCAmelCase_ = self.norma(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
UpperCAmelCase_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCAmelCase_ = torch.cat(
[self.ff(_SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(_SCREAMING_SNAKE_CASE , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCAmelCase_ = self.ff(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = gate_mlp.unsqueeze(1 ) * ff_output
UpperCAmelCase_ = ff_output + hidden_states
return hidden_states
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "geglu" , lowerCAmelCase = False , ):
super().__init__()
UpperCAmelCase_ = int(dim * mult )
UpperCAmelCase_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCAmelCase_ = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
UpperCAmelCase_ = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , approximate="tanh" )
elif activation_fn == "geglu":
UpperCAmelCase_ = GEGLU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
UpperCAmelCase_ = ApproximateGELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.ModuleList([] )
# project in
self.net.append(_SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
def A__ ( self , lowerCAmelCase ):
for module in self.net:
UpperCAmelCase_ = module(_SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = "none" ):
super().__init__()
UpperCAmelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = approximate
def A__ ( self , lowerCAmelCase ):
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.proj(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.gelu(_SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , dim_out * 2 )
def A__ ( self , lowerCAmelCase ):
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = self.proj(_SCREAMING_SNAKE_CASE ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_SCREAMING_SNAKE_CASE )
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.proj(_SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , embedding_dim * 2 )
UpperCAmelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_SCREAMING_SNAKE_CASE , 2 )
UpperCAmelCase_ = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = CombinedTimestepLabelEmbeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE , eps=1e-6 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=_SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = emb.chunk(6 , dim=1 )
UpperCAmelCase_ = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 1e-5 ):
super().__init__()
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = eps
if act_fn is None:
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = get_activation(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , out_dim * 2 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
if self.act:
UpperCAmelCase_ = self.act(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = self.linear(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = emb[:, :, None, None]
UpperCAmelCase_ , UpperCAmelCase_ = emb.chunk(2 , dim=1 )
UpperCAmelCase_ = F.group_norm(_SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps )
UpperCAmelCase_ = x * (1 + scale) + shift
return x
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [0] * len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = []
UpperCAmelCase_ = [1] * len(__SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
UpperCAmelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
print(max(__SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 701 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
from __future__ import annotations
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = text, pattern
UpperCAmelCase_ , UpperCAmelCase_ = len(lowerCAmelCase ), len(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ ( self , lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ ( self ):
# searches pattern in text and returns index positions
UpperCAmelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase_ = self.mismatch_in_text(lowerCAmelCase )
if mismatch_index == -1:
positions.append(lowerCAmelCase )
else:
UpperCAmelCase_ = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE = "ABAABA"
SCREAMING_SNAKE_CASE = "AB"
SCREAMING_SNAKE_CASE = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
SCREAMING_SNAKE_CASE = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
SCREAMING_SNAKE_CASE = typing.Union[np.floataa, int, float] # noqa: UP007
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(__SCREAMING_SNAKE_CASE ) - np.asarray(__SCREAMING_SNAKE_CASE )) ** 2 ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def snake_case__ ( ) -> None:
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
benchmark()
| 703 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = time.time()
locka.acquire(__SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = "a" * 1000 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCAmelCase_ = AlbertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = AlbertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = module
UpperCAmelCase_ = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase , bias=lowerCAmelCase ) , nn.Linear(lowerCAmelCase , module.out_features , bias=lowerCAmelCase ) , )
UpperCAmelCase_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A__ ( self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return self.module(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) + self.adapter(lowerCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = 'bigscience/bloom-1b7'
# Constant values
lowerCAmelCase_ : Union[str, Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCAmelCase_ : List[Any] = 'Hello my name is'
lowerCAmelCase_ : List[str] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowerCAmelCase_ : Any = 10
def A__ ( self ):
# Models and tokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
# Models and tokenizer
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
def A__ ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase , "quantization_config" ) )
UpperCAmelCase_ = config.to_dict()
UpperCAmelCase_ = config.to_diff_dict()
UpperCAmelCase_ = config.to_json_string()
def A__ ( self ):
from bitsandbytes.nn import Paramsabit
UpperCAmelCase_ = self.model_fpaa.get_memory_footprint()
UpperCAmelCase_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCAmelCase_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A__ ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase_ = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self ):
UpperCAmelCase_ = BitsAndBytesConfig()
UpperCAmelCase_ = True
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase_ = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self ):
with self.assertRaises(lowerCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , load_in_abit=lowerCAmelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def A__ ( self ):
with self.assertRaises(lowerCAmelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase_ = self.model_fpaa.to(torch.floataa )
UpperCAmelCase_ = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCAmelCase_ = self.model_fpaa.to("cpu" )
# Check this does not throw an error
UpperCAmelCase_ = self.model_fpaa.half()
# Check this does not throw an error
UpperCAmelCase_ = self.model_fpaa.float()
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCAmelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def A__ ( cls ):
UpperCAmelCase_ = "t5-small"
UpperCAmelCase_ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCAmelCase_ = AutoTokenizer.from_pretrained(cls.model_name )
UpperCAmelCase_ = "Translate in German: Hello, my dog is cute"
def A__ ( self ):
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
from transformers import TaForConditionalGeneration
UpperCAmelCase_ = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCAmelCase_ = None
# test with `t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
UpperCAmelCase_ = modules
def A__ ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
# model_name
UpperCAmelCase_ = "bigscience/bloom-560m"
UpperCAmelCase_ = "t5-small"
# Different types of model
UpperCAmelCase_ = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# Sequence classification model
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# CausalLM model
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# Seq2seq model
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase , device_map="auto" )
def A__ ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
def A__ ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCAmelCase_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
def A__ ( self ):
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
UpperCAmelCase_ = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = "facebook/opt-350m"
super().setUp()
def A__ ( self ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCAmelCase_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCAmelCase_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase ) ):
UpperCAmelCase_ = LoRALayer(module.q_proj , rank=16 )
UpperCAmelCase_ = LoRALayer(module.k_proj , rank=16 )
UpperCAmelCase_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCAmelCase_ = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCAmelCase_ = model.forward(**lowerCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = 'gpt2-xl'
lowerCAmelCase_ : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 706 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
debug_launcher(test_script.main )
def A__ ( self ):
debug_launcher(test_ops.main )
| 707 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 0 |
import argparse
from collections import defaultdict
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = f'''class {class_name}('''
UpperCAmelCase_ = f'''{4 * ' '}def {test_name}('''
UpperCAmelCase_ = f'''{8 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase_ = f'''{16 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
UpperCAmelCase_ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * ' '}{correct_line}''' )
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Dict:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ = None
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 708 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : Dict = PegasusConfig
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Optional[int] = 'gelu'
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=40 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_pegasus_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TFPegasusModel(config=lowerCAmelCase ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = inputs_dict["head_mask"]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1e-3 )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase_ : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ : Optional[Any] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : List[str] = False
def A__ ( self ):
UpperCAmelCase_ = TFPegasusModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase_ : str = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase_ : Any = 'google/pegasus-xsum'
@cached_property
def A__ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A__ ( self ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A__ ( self , **lowerCAmelCase ):
UpperCAmelCase_ = self.translate_src_text(**lowerCAmelCase )
assert self.expected_text == generated_words
def A__ ( self , **lowerCAmelCase ):
UpperCAmelCase_ = self.tokenizer(self.src_text , **lowerCAmelCase , padding=lowerCAmelCase , return_tensors="tf" )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase , )
UpperCAmelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def A__ ( self ):
self._assert_generated_batch_equal_expected()
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 0 |
SCREAMING_SNAKE_CASE = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
SCREAMING_SNAKE_CASE = {"bert_for_seq_generation": 512}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[int] = []
lowerCAmelCase_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<::::>" , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sep_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def A__ ( self ):
return self.sp_model.get_piece_size()
def A__ ( self ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , lowerCAmelCase ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCAmelCase ):
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return self.sp_model.piece_to_id(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.sp_model.IdToPiece(lowerCAmelCase )
return token
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
SCREAMING_SNAKE_CASE = "1"
SCREAMING_SNAKE_CASE = "0"
SCREAMING_SNAKE_CASE = "1"
SCREAMING_SNAKE_CASE = ort.SessionOptions()
SCREAMING_SNAKE_CASE = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
SCREAMING_SNAKE_CASE = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
SCREAMING_SNAKE_CASE = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
SCREAMING_SNAKE_CASE = ort.RunOptions()
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = 2000
SCREAMING_SNAKE_CASE = {}
for iter in range(max_iters):
SCREAMING_SNAKE_CASE = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
SCREAMING_SNAKE_CASE = {"allegro/herbert-base-cased": 514}
SCREAMING_SNAKE_CASE = {}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : int = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[Any] = HerbertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase="</s>" , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sep_token=lowerCAmelCase , **lowerCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase ):
UpperCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = FlaxAutoModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase ):
UpperCAmelCase_ = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = FlaxAutoModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = FlaxBertModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase ):
return model(**lowerCAmelCase )
eval(**lowerCAmelCase ).block_until_ready()
@slow
def A__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = FlaxRobertaModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase ):
return model(**lowerCAmelCase )
eval(**lowerCAmelCase ).block_until_ready()
def A__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = FlaxAutoModel.from_pretrained("bert-base" )
def A__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = FlaxAutoModel.from_pretrained(lowerCAmelCase , revision="aaaaaa" )
def A__ ( self ):
with self.assertRaisesRegex(
lowerCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
UpperCAmelCase_ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def A__ ( self ):
with self.assertRaisesRegex(lowerCAmelCase , "Use `from_pt=True` to load this model" ):
UpperCAmelCase_ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = 'mgp-str'
def __init__( self , lowerCAmelCase=[32, 128] , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=27 , lowerCAmelCase=38 , lowerCAmelCase=5_0257 , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=1e-5 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=False , lowerCAmelCase=0.02 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = max_token_length
UpperCAmelCase_ = num_character_labels
UpperCAmelCase_ = num_bpe_labels
UpperCAmelCase_ = num_wordpiece_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = distilled
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = drop_rate
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = attn_drop_rate
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = output_aa_attentions
UpperCAmelCase_ = initializer_range
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : jnp.ndarray
lowerCAmelCase_ : jnp.ndarray
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : Tuple[int] = (16, 32, 96, 256)
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def A__ ( self ):
UpperCAmelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase_ = self.block_out_channels[i]
UpperCAmelCase_ = self.block_out_channels[i + 1]
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
UpperCAmelCase_ = blocks
UpperCAmelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ):
UpperCAmelCase_ = self.conv_in(lowerCAmelCase )
UpperCAmelCase_ = nn.silu(lowerCAmelCase )
for block in self.blocks:
UpperCAmelCase_ = block(lowerCAmelCase )
UpperCAmelCase_ = nn.silu(lowerCAmelCase )
UpperCAmelCase_ = self.conv_out(lowerCAmelCase )
return embedding
@flax_register_to_config
class lowerCamelCase ( nn.Module, lowercase__, lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ : Union[bool, Tuple[bool]] = False
lowerCAmelCase_ : Tuple[int] = (320, 640, 1280, 1280)
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Union[int, Tuple[int]] = 8
lowerCAmelCase_ : Optional[Union[int, Tuple[int]]] = None
lowerCAmelCase_ : int = 1280
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : str = "rgb"
lowerCAmelCase_ : Tuple[int] = (16, 32, 96, 256)
def A__ ( self , lowerCAmelCase ):
# init input tensors
UpperCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ = jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
UpperCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCAmelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase_ = jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(lowerCAmelCase )
UpperCAmelCase_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )["params"]
def A__ ( self ):
UpperCAmelCase_ = self.block_out_channels
UpperCAmelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCAmelCase_ = FlaxTimestepEmbedding(lowerCAmelCase , dtype=self.dtype )
UpperCAmelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCAmelCase_ = self.only_cross_attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = block_out_channels[0]
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCAmelCase_ = FlaxDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase )
for _ in range(self.layers_per_block ):
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
if not is_final_block:
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
UpperCAmelCase_ = down_blocks
UpperCAmelCase_ = controlnet_down_blocks
# mid
UpperCAmelCase_ = block_out_channels[-1]
UpperCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1.0 , lowerCAmelCase = True , lowerCAmelCase = False , ):
UpperCAmelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase_ = jnp.flip(lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(lowerCAmelCase , jnp.ndarray ):
UpperCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ = jnp.expand_dims(lowerCAmelCase , 0 )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
# 2. pre-process
UpperCAmelCase_ = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
UpperCAmelCase_ = self.conv_in(lowerCAmelCase )
UpperCAmelCase_ = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
UpperCAmelCase_ = self.controlnet_cond_embedding(lowerCAmelCase )
sample += controlnet_cond
# 3. down
UpperCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = down_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ = down_block(lowerCAmelCase , lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase_ = self.mid_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
UpperCAmelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase , self.controlnet_down_blocks ):
UpperCAmelCase_ = controlnet_block(lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ = controlnet_down_block_res_samples
UpperCAmelCase_ = self.controlnet_mid_block(lowerCAmelCase )
# 6. scaling
UpperCAmelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase , mid_block_res_sample=lowerCAmelCase )
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
from math import factorial, pi
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCAmelCase_ = float(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCAmelCase_ = float(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCamelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def A__ ( self ):
return self.head == self.tail
def A__ ( self , lowerCAmelCase ):
self.data.append(lowerCAmelCase )
UpperCAmelCase_ = self.tail + 1
def A__ ( self ):
UpperCAmelCase_ = self.data[self.head]
UpperCAmelCase_ = self.head + 1
return ret
def A__ ( self ):
return self.tail - self.head
def A__ ( self ):
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = 1
def A__ ( self ):
return self.data
def A__ ( self ):
return self.left
def A__ ( self ):
return self.right
def A__ ( self ):
return self.height
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = data
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = node
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = node
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = height
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return node.get_height()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if a > b:
return a
return b
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> MyNode:
print("left rotation node:" , node.get_data() )
UpperCAmelCase_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__SCREAMING_SNAKE_CASE )
return ret
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> MyNode:
print("right rotation node:" , node.get_data() )
UpperCAmelCase_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__SCREAMING_SNAKE_CASE )
return ret
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> MyNode:
UpperCAmelCase_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__SCREAMING_SNAKE_CASE ) )
return right_rotation(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> MyNode:
UpperCAmelCase_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__SCREAMING_SNAKE_CASE ) )
return left_rotation(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> MyNode | None:
if node is None:
return MyNode(__SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
UpperCAmelCase_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCAmelCase_ = right_rotation(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = lr_rotation(__SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() , __SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
UpperCAmelCase_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCAmelCase_ = rl_rotation(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = left_rotation(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
return node
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
while True:
UpperCAmelCase_ = root.get_right()
if right_child is None:
break
UpperCAmelCase_ = right_child
return root.get_data()
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
while True:
UpperCAmelCase_ = root.get_left()
if left_child is None:
break
UpperCAmelCase_ = left_child
return root.get_data()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> MyNode | None:
UpperCAmelCase_ = root.get_left()
UpperCAmelCase_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCAmelCase_ = get_left_most(__SCREAMING_SNAKE_CASE )
root.set_data(__SCREAMING_SNAKE_CASE )
root.set_right(del_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
UpperCAmelCase_ = left_child
elif right_child is not None:
UpperCAmelCase_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if get_height(__SCREAMING_SNAKE_CASE ) - get_height(__SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
UpperCAmelCase_ = left_rotation(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = rl_rotation(__SCREAMING_SNAKE_CASE )
elif get_height(__SCREAMING_SNAKE_CASE ) - get_height(__SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
UpperCAmelCase_ = right_rotation(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = lr_rotation(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__SCREAMING_SNAKE_CASE )
return root
class lowerCamelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = None
def A__ ( self ):
return get_height(self.root )
def A__ ( self , lowerCAmelCase ):
print("insert:" + str(lowerCAmelCase ) )
UpperCAmelCase_ = insert_node(self.root , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
print("delete:" + str(lowerCAmelCase ) )
if self.root is None:
print("Tree is empty!" )
return
UpperCAmelCase_ = del_node(self.root , lowerCAmelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
UpperCAmelCase_ = ""
UpperCAmelCase_ = MyQueue()
q.push(self.root )
UpperCAmelCase_ = self.get_height()
if layer == 0:
return output
UpperCAmelCase_ = 0
while not q.is_empty():
UpperCAmelCase_ = q.pop()
UpperCAmelCase_ = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCAmelCase_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
UpperCAmelCase_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def snake_case__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE = AVLtree()
SCREAMING_SNAKE_CASE = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import math
from datetime import datetime, timedelta
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> datetime:
UpperCAmelCase_ = year % 19
UpperCAmelCase_ = year % 4
UpperCAmelCase_ = year % 7
UpperCAmelCase_ = math.floor(year / 100 )
UpperCAmelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase_ = leap_day_inhibits / 4
UpperCAmelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__SCREAMING_SNAKE_CASE , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__SCREAMING_SNAKE_CASE , 4 , 18 )
else:
return datetime(__SCREAMING_SNAKE_CASE , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE = "will be" if year > datetime.now().year else "was"
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 700 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 701 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
import os
def snake_case__ ( ) -> str:
UpperCAmelCase_ = os.path.dirname(os.path.realpath(__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , "triangle.txt" )
with open(__SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
for line in triangle:
UpperCAmelCase_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__SCREAMING_SNAKE_CASE ) )
a.append(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "x" , __SCREAMING_SNAKE_CASE = 10**-10 , __SCREAMING_SNAKE_CASE = 1 , ) -> complex:
UpperCAmelCase_ = symbols(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = lambdify(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = lambdify(__SCREAMING_SNAKE_CASE , diff(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = starting_point
while True:
if diff_function(__SCREAMING_SNAKE_CASE ) != 0:
UpperCAmelCase_ = prev_guess - multiplicity * func(__SCREAMING_SNAKE_CASE ) / diff_function(
__SCREAMING_SNAKE_CASE )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCAmelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 703 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
SCREAMING_SNAKE_CASE = get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> int:
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(f'''Saving model to {ckpt_dir}''' )
UpperCAmelCase_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__SCREAMING_SNAKE_CASE ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
UpperCAmelCase_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Loading model from {input_model_file}''' )
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Loading model from {input_model_file}''' )
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase_ = (
os.path.join(__SCREAMING_SNAKE_CASE , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
UpperCAmelCase_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , planner=DefaultLoadPlanner() , )
UpperCAmelCase_ = state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase_ = FSDP.optim_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
UpperCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
UpperCAmelCase_ = (
os.path.join(__SCREAMING_SNAKE_CASE , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
UpperCAmelCase_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , )
UpperCAmelCase_ = optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
UpperCAmelCase_ = FSDP.optim_state_dict_to_load(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
optimizer.load_state_dict(__SCREAMING_SNAKE_CASE )
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = abs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = abs(__SCREAMING_SNAKE_CASE )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
return sum(int(__SCREAMING_SNAKE_CASE ) for c in str(abs(__SCREAMING_SNAKE_CASE ) ) )
def snake_case__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = f'''{func.__name__}({value})'''
UpperCAmelCase_ = timeit(f'''__main__.{call}''' , setup="import __main__" )
print(f'''{call:56} = {func(__SCREAMING_SNAKE_CASE )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
SCREAMING_SNAKE_CASE = TypeVar("T")
SCREAMING_SNAKE_CASE = Union[List[T], Tuple[T, ...]]
SCREAMING_SNAKE_CASE = Union[T, List[T], Dict[str, T]]
SCREAMING_SNAKE_CASE = Union[str, bytes, os.PathLike]
| 706 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
UpperCAmelCase_ = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [True] * (num + 1)
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , __SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
UpperCAmelCase_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 707 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = data
def __iter__( self ):
for element in self.data:
yield element
def snake_case__ ( __SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
UpperCAmelCase_ = Accelerator(even_batches=__SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Any:
if iterable:
UpperCAmelCase_ = DummyIterableDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) )
else:
UpperCAmelCase_ = TensorDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase_ = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = accelerator.prepare(__SCREAMING_SNAKE_CASE )
return dl
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
UpperCAmelCase_ = create_dataloader(accelerator=__SCREAMING_SNAKE_CASE , dataset_size=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def snake_case__ ( ) -> Optional[int]:
UpperCAmelCase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def snake_case__ ( ) -> Optional[int]:
UpperCAmelCase_ = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def snake_case__ ( ) -> Dict:
UpperCAmelCase_ = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.nn.Linear(1 , 1 )
UpperCAmelCase_ = accelerator.prepare(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
UpperCAmelCase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = ddp_model(batch[0].float() )
UpperCAmelCase_ = output.sum()
loss.backward()
batch_idxs.append(__SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def snake_case__ ( ) -> Optional[Any]:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.nn.Linear(1 , 1 )
UpperCAmelCase_ = accelerator.prepare(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
UpperCAmelCase_ = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = train_dl.batch_sampler.even_batches
UpperCAmelCase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def snake_case__ ( ) -> int:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.nn.Linear(1 , 1 )
UpperCAmelCase_ = accelerator.prepare(__SCREAMING_SNAKE_CASE )
create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def snake_case__ ( ) -> List[Any]:
UpperCAmelCase_ = create_accelerator()
UpperCAmelCase_ = torch.nn.Linear(1 , 1 )
UpperCAmelCase_ = accelerator.prepare(__SCREAMING_SNAKE_CASE )
create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def snake_case__ ( ) -> Union[str, Any]:
UpperCAmelCase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
UpperCAmelCase_ = accelerator.state.distributed_type
UpperCAmelCase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = original_state
if __name__ == "__main__":
main()
| 708 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 0 |
import torch
from torch import nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1 , lowerCAmelCase=False ):
super().__init__()
UpperCAmelCase_ = n_token
UpperCAmelCase_ = d_embed
UpperCAmelCase_ = d_proj
UpperCAmelCase_ = cutoffs + [n_token]
UpperCAmelCase_ = [0] + self.cutoffs
UpperCAmelCase_ = div_val
UpperCAmelCase_ = self.cutoffs[0]
UpperCAmelCase_ = len(self.cutoffs ) - 1
UpperCAmelCase_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase_ = nn.ModuleList()
UpperCAmelCase_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase ) ) )
else:
self.out_projs.append(lowerCAmelCase )
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx ) )
UpperCAmelCase_ = keep_order
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if proj is None:
UpperCAmelCase_ = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase_ = nn.functional.linear(lowerCAmelCase , proj.t().contiguous() )
UpperCAmelCase_ = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase_ = hidden[..., :-1, :].contiguous()
UpperCAmelCase_ = labels[..., 1:].contiguous()
UpperCAmelCase_ = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase_ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase_ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase_ = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase_ = labels != -100
UpperCAmelCase_ = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase_ = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase_ = nn.functional.log_softmax(lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ = self.out_layers[i].weight
UpperCAmelCase_ = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase )
biases.append(lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
if labels is None:
UpperCAmelCase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase_ = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase_ = 0
UpperCAmelCase_ = [0] + self.cutoffs
for i in range(len(lowerCAmelCase ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase_ = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase_ = labels.index_select(0 , lowerCAmelCase ) - l_idx
UpperCAmelCase_ = head_logprob.index_select(0 , lowerCAmelCase )
UpperCAmelCase_ = hidden.index_select(0 , lowerCAmelCase )
else:
UpperCAmelCase_ = hidden
if i == 0:
if labels is not None:
UpperCAmelCase_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
UpperCAmelCase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase_ = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A__ ( self , lowerCAmelCase ):
if self.n_clusters == 0:
UpperCAmelCase_ = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ = self.out_layers[i].weight
UpperCAmelCase_ = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase )
biases.append(lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase_ = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
UpperCAmelCase_ = [0] + self.cutoffs
for i in range(len(lowerCAmelCase ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase_ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
UpperCAmelCase_ = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase_ = logprob_i
return out
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = StableDiffusionSAGPipeline
lowerCAmelCase_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : List[Any] = False
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def A__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = "."
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = "."
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = "."
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 710 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 0 |
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
SCREAMING_SNAKE_CASE = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCAmelCase_ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCAmelCase_ = evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase )
return score
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import requests
SCREAMING_SNAKE_CASE = "YOUR API KEY"
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = giphy_api_key ) -> list:
UpperCAmelCase_ = "+".join(query.split() )
UpperCAmelCase_ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
UpperCAmelCase_ = requests.get(__SCREAMING_SNAKE_CASE ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if n_element < 1:
UpperCAmelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (0, 0, 0)
UpperCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
SCREAMING_SNAKE_CASE = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=4 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_attention_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_choices
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_attention_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase , )
return config, input_ids, attention_mask
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self ):
UpperCAmelCase_ = FlaxDistilBertModelTester(self )
@slow
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
UpperCAmelCase_ = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase )
UpperCAmelCase_ = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1e-4 ) )
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 0 |
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> np.ndarray:
return vector * sigmoid(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = psutil.Process()
UpperCAmelCase_ = False
def A__ ( self ):
UpperCAmelCase_ = -1
while True:
UpperCAmelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ ( self ):
UpperCAmelCase_ = True
UpperCAmelCase_ = threading.Thread(target=self.peak_monitor )
UpperCAmelCase_ = True
self.thread.start()
def A__ ( self ):
UpperCAmelCase_ = False
self.thread.join()
return self.cpu_memory_peak
SCREAMING_SNAKE_CASE = PeakCPUMemory()
def snake_case__ ( ) -> List[Any]:
# Time
UpperCAmelCase_ = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = torch.cuda.memory_allocated(__SCREAMING_SNAKE_CASE )
torch.cuda.reset_peak_memory_stats()
return measures
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
# Time
UpperCAmelCase_ = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
UpperCAmelCase_ = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = (torch.cuda.memory_allocated(__SCREAMING_SNAKE_CASE ) - start_measures[str(__SCREAMING_SNAKE_CASE )]) / 2**20
UpperCAmelCase_ = (torch.cuda.max_memory_allocated(__SCREAMING_SNAKE_CASE ) - start_measures[str(__SCREAMING_SNAKE_CASE )]) / 2**20
return measures
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
print(f'''{description}:''' )
print(f'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(__SCREAMING_SNAKE_CASE )]:.2f}MiB''' )
UpperCAmelCase_ = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 'trocr'
lowerCAmelCase_ : Union[str, Any] = ['past_key_values']
lowerCAmelCase_ : List[Any] = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , lowerCAmelCase=5_0265 , lowerCAmelCase=1024 , lowerCAmelCase=12 , lowerCAmelCase=16 , lowerCAmelCase=4096 , lowerCAmelCase="gelu" , lowerCAmelCase=512 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = init_std
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = scale_embedding
UpperCAmelCase_ = use_learned_position_embeddings
UpperCAmelCase_ = layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def A__ ( lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def A__ ( self ):
raise NotImplementedError()
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : str = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
class lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ['flax']
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(self , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
@classmethod
def A__ ( cls , *lowerCAmelCase , **lowerCAmelCase ):
requires_backends(cls , ["flax"] )
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : str = 'nat'
lowerCAmelCase_ : Optional[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=64 , lowerCAmelCase=[3, 4, 6, 5] , lowerCAmelCase=[2, 4, 8, 16] , lowerCAmelCase=7 , lowerCAmelCase=3.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=0.02 , lowerCAmelCase=1e-5 , lowerCAmelCase=0.0 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
UpperCAmelCase_ = layer_scale_init_value
UpperCAmelCase_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
SCREAMING_SNAKE_CASE = "Input must be a string of 8 numbers plus letter"
SCREAMING_SNAKE_CASE = "TRWAGMYFPDXBNJZSQVHLCKE"
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = f'''Expected string as input, found {type(__SCREAMING_SNAKE_CASE ).__name__}'''
raise TypeError(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = spanish_id.replace("-" , "" ).upper()
if len(__SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(__SCREAMING_SNAKE_CASE )
try:
UpperCAmelCase_ = int(spanish_id_clean[0:8] )
UpperCAmelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(__SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCAmelCase_ = 6
UpperCAmelCase_ = 128
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ = 12
UpperCAmelCase_ = 192
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase_ = window_size
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = num_heads
return config
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "encoder.mask_token" in name:
UpperCAmelCase_ = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ = "swin." + name
return name
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[2] )
UpperCAmelCase_ = int(key_split[4] )
UpperCAmelCase_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[
:dim
]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[
-dim:
]
else:
UpperCAmelCase_ = val
return orig_state_dict
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
UpperCAmelCase_ = get_swin_config(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = SwinForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = ViTImageProcessor(size={"height": 192, "width": 192} )
UpperCAmelCase_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
UpperCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ = model(**__SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 701 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = torch.device("cpu")
def snake_case__ ( ) -> Optional[int]:
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = dct.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = []
for k in state_dict.keys():
UpperCAmelCase_ = k
if ".pwconv" in k:
UpperCAmelCase_ = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ = 1000
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ = [3, 3, 6, 4]
UpperCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ = [3, 3, 9, 6]
UpperCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ = [4, 3, 10, 5]
UpperCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ = [4, 4, 12, 6]
UpperCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="cpu" , check_hash=__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = create_rename_keys(__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
UpperCAmelCase_ = SwiftFormerForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
# prepare test inputs
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ = get_expected_output(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __SCREAMING_SNAKE_CASE , atol=1E-3 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ = [1]
for i in range(2 , __SCREAMING_SNAKE_CASE ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase_ = []
UpperCAmelCase_ = list(range(__SCREAMING_SNAKE_CASE ) )
# Find permutation
while factorials:
UpperCAmelCase_ = factorials.pop()
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCAmelCase_ : Dict = TaTokenizer
lowerCAmelCase_ : List[int] = []
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=100 , lowerCAmelCase=None , **lowerCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [f'''<extra_id_{i}>''' for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ = len(set(filter(lambda lowerCAmelCase : bool("extra_id_" in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = extra_ids
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase , )
return max_model_length
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A__ ( self ):
return list(
set(filter(lambda lowerCAmelCase : bool(re.search(r"<extra_id_\d+>" , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def A__ ( self ):
return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 'ibert'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=False , lowerCAmelCase="none" , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = quant_mode
UpperCAmelCase_ = force_dequant
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 706 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.