code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ : List[Any] = 16
lowerCamelCase__ : List[str] = 32
def __A ( a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : List[Any] , a_ : str = 16 )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE : List[str] = DatasetDict(
{
'''train''': dataset['''train'''].select(_lowercase ),
'''validation''': dataset['''train'''].select(_lowercase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(a_ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Dict = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(a_ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Dict = 8
else:
SCREAMING_SNAKE_CASE : Dict = None
return tokenizer.pad(
_lowercase , padding='''longest''' , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets['''test'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __A ( a_ : Tuple , a_ : Optional[int] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
# Download the dataset
SCREAMING_SNAKE_CASE : Any = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE : int = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Dict = config['''lr''']
SCREAMING_SNAKE_CASE : int = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE : List[str] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE : str = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE : Tuple = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
_lowercase , _lowercase , _lowercase , _lowercase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : List[str] = AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=1_00 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowercase )
SCREAMING_SNAKE_CASE : int = outputs.loss
SCREAMING_SNAKE_CASE : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowercase )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE : List[Any] = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**_lowercase )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE : str = torch.cat(_lowercase , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = metric.compute(predictions=_lowercase , references=_lowercase )
accelerator.print('''Average test metrics from all folds:''' , _lowercase )
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_lowercase , default=_lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=_lowercase , default=3 , help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 0 |
"""simple docstring"""
def __A ( a_ : int , a_ : float , a_ : float )-> Union[str, Any]:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __A ( a_ : float , a_ : float , a_ : float )-> Optional[int]:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __A ( a_ : float , a_ : float , a_ : float )-> int:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __A ( a_ : float , a_ : float , a_ : float )-> Dict:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 0 |
"""simple docstring"""
import inspect
import unittest
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE : Tuple = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE : Dict = """k-diffusion"""
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE : List[Any] = """invisible-watermark"""
assert backend in deps, f"{backend} is not in the deps table!"
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
if not hasattr(lowerCAmelCase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE : Optional[int] = args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 720 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
def __A ( a_ : int = 50 )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : Optional[Any] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ : Optional[Any] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Whether tp freeze the encoder."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCamelCase = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
UpperCamelCase = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
UpperCamelCase = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
UpperCamelCase = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Source language id for translation."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Target language id for translation."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """# num_beams to use for evaluation."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def __A ( a_ : str , a_ : List[str] , a_ : List[Any] )-> Tuple:
'''simple docstring'''
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F"{split}_results.json" ) )
def __A ( )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Dict = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE : str = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE : Dict = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE : Dict = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE : str = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE : str = train_result.metrics
SCREAMING_SNAKE_CASE : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate(metric_key_prefix='''val''' )
SCREAMING_SNAKE_CASE : List[str] = data_args.n_val
SCREAMING_SNAKE_CASE : Dict = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE_ , metric_key_prefix='''test''' )
SCREAMING_SNAKE_CASE : Dict = test_output.metrics
SCREAMING_SNAKE_CASE : Dict = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE : Tuple = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[str] = lmap(str.strip , SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def __A ( a_ : Optional[Any] )-> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 701 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""input_features""", """is_longer"""]
def __init__( self :Any , lowerCamelCase_ :Union[str, Any]=64 , lowerCamelCase_ :int=4_80_00 , lowerCamelCase_ :int=4_80 , lowerCamelCase_ :Any=10 , lowerCamelCase_ :Union[str, Any]=10_24 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :float = 0 , lowerCamelCase_ :float = 1_40_00 , lowerCamelCase_ :int = None , lowerCamelCase_ :str = "fusion" , lowerCamelCase_ :str = "repeatpad" , **lowerCamelCase_ :Any , ) -> int:
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = top_db
SCREAMING_SNAKE_CASE : Dict = truncation
SCREAMING_SNAKE_CASE : Dict = padding
SCREAMING_SNAKE_CASE : List[str] = fft_window_size
SCREAMING_SNAKE_CASE : Any = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE : Optional[int] = max_length_s
SCREAMING_SNAKE_CASE : Any = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sampling_rate
SCREAMING_SNAKE_CASE : Any = frequency_min
SCREAMING_SNAKE_CASE : Dict = frequency_max
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm=lowerCamelCase_ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :np.array , lowerCamelCase_ :Optional[np.array] = None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram(
lowerCamelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Optional[int] = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Dict = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : Tuple = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : Optional[int] = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
lowerCamelCase_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :np.array , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] ) -> Dict:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ ) - max_length
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Union[str, Any] = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Dict = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : Union[str, Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : List[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Any = False
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._random_mel_fusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Optional[Any] = int(max_length / len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(lowerCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : str = int(max_length / len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = np.stack(np.tile(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.pad(lowerCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : Optional[int] = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : str = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self :Union[str, Any] , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :str = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , **lowerCamelCase_ :Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : str = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Optional[Any] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : Optional[Any] = [
self._get_input_mel(lowerCamelCase_ , max_length if max_length else self.nb_max_samples , lowerCamelCase_ , lowerCamelCase_ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Any = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase_ )
is_longer.append(lowerCamelCase_ )
if truncation == "fusion" and sum(lowerCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : str = np.random.randint(0 , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = True
if isinstance(input_mel[0] , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : str = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : str = {'input_features': input_mel, 'is_longer': is_longer}
SCREAMING_SNAKE_CASE : Dict = BatchFeature(lowerCamelCase_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Tuple = input_features.convert_to_tensors(lowerCamelCase_ )
return input_features
| 702 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCamelCase__ : Dict = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ : str = "MobileNetV1Config"
# Base docstring
lowerCamelCase__ : Dict = "google/mobilenet_v1_1.0_224"
lowerCamelCase__ : Union[str, Any] = [1, 1024, 7, 7]
# Image classification docstring
lowerCamelCase__ : str = "google/mobilenet_v1_1.0_224"
lowerCamelCase__ : List[str] = "tabby, tabby cat"
lowerCamelCase__ : List[str] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __A ( a_ : int , a_ : Optional[int] , a_ : str=None )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE : str = model
SCREAMING_SNAKE_CASE : List[Any] = 'MobilenetV1/Conv2d_0/'
SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE : Optional[int] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE : int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE : int = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE : int = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE : Optional[Any] = i + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = i * 2
SCREAMING_SNAKE_CASE : Optional[Any] = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE : int = pointer.convolution.weight
SCREAMING_SNAKE_CASE : List[Any] = pointer.normalization.bias
SCREAMING_SNAKE_CASE : Any = pointer.normalization.weight
SCREAMING_SNAKE_CASE : Tuple = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE : str = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE : str = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE : Dict = pointer.convolution.weight
SCREAMING_SNAKE_CASE : List[str] = pointer.normalization.bias
SCREAMING_SNAKE_CASE : Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE : Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : str = pointer.normalization.running_var
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
SCREAMING_SNAKE_CASE : Tuple = model.classifier.weight
SCREAMING_SNAKE_CASE : Dict = model.classifier.bias
return tf_to_pt_map
def __A ( a_ : Tuple , a_ : Tuple , a_ : Dict )-> List[Any]:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE : int = tf.train.list_variables(__lowercase )
SCREAMING_SNAKE_CASE : Any = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE : Optional[int] = tf.train.load_variable(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE : int = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE : Optional[int] = _build_tf_to_pytorch_map(__lowercase , __lowercase , __lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.transpose(__lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE : str = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE : Dict = np.transpose(__lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(__lowercase )
tf_weights.pop(__lowercase , __lowercase )
tf_weights.pop(name + '''/RMSProp''' , __lowercase )
tf_weights.pop(name + '''/RMSProp_1''' , __lowercase )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __lowercase )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def __A ( a_ : torch.Tensor , a_ : nn.Convad )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = features.shape[-2:]
SCREAMING_SNAKE_CASE : str = conv_layer.stride
SCREAMING_SNAKE_CASE : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE : Tuple = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE : Any = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE : List[Any] = pad_along_width // 2
SCREAMING_SNAKE_CASE : List[str] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE : Optional[int] = pad_along_height // 2
SCREAMING_SNAKE_CASE : Union[str, Any] = pad_along_height - pad_top
SCREAMING_SNAKE_CASE : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowercase , __lowercase , '''constant''' , 0.0 )
class lowercase__( nn.Module ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Any = 1 , lowerCamelCase_ :int = 1 , lowerCamelCase_ :List[str] = False , lowerCamelCase_ :Dict = True , lowerCamelCase_ :Union[str, Any] = True , ) -> Dict:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
SCREAMING_SNAKE_CASE : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=lowerCamelCase_ , groups=lowerCamelCase_ , bias=lowerCamelCase_ , padding_mode='''zeros''' , )
if use_normalization:
SCREAMING_SNAKE_CASE : Any = nn.BatchNormad(
num_features=lowerCamelCase_ , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=lowerCamelCase_ , track_running_stats=lowerCamelCase_ , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if use_activation:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE : Optional[int] = config.hidden_act
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
if self.config.tf_padding:
SCREAMING_SNAKE_CASE : List[str] = apply_tf_padding(lowerCamelCase_ , self.convolution )
SCREAMING_SNAKE_CASE : List[Any] = self.convolution(lowerCamelCase_ )
if self.normalization is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.normalization(lowerCamelCase_ )
if self.activation is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.activation(lowerCamelCase_ )
return features
class lowercase__( __A ):
'''simple docstring'''
UpperCamelCase = MobileNetVaConfig
UpperCamelCase = load_tf_weights_in_mobilenet_va
UpperCamelCase = """mobilenet_v1"""
UpperCamelCase = """pixel_values"""
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCamelCase__ : Tuple = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowerCamelCase__ : Optional[int] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , __A , )
class lowercase__( __A ):
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] = True ) -> Any:
'''simple docstring'''
super().__init__(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = config
SCREAMING_SNAKE_CASE : Union[str, Any] = 32
SCREAMING_SNAKE_CASE : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE : int = MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=config.num_channels , out_channels=lowerCamelCase_ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=3 , stride=strides[i] , groups=lowerCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Optional[int] ) -> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str = None , lowerCamelCase_ :Tuple = None , lowerCamelCase_ :Any = None , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_stem(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE : int = layer_module(lowerCamelCase_ )
if output_hidden_states:
SCREAMING_SNAKE_CASE : str = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : List[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.flatten(self.pooler(lowerCamelCase_ ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE : Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=lowerCamelCase_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __A , )
class lowercase__( __A ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = config.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileNetVaModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(lowerCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[Any] = None , lowerCamelCase_ :Dict = None , lowerCamelCase_ :Any = None , lowerCamelCase_ :Dict = None , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Any = self.mobilenet_va(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Any = self.classifier(self.dropout(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : Any = 'single_label_classification'
else:
SCREAMING_SNAKE_CASE : Any = 'multi_label_classification'
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Tuple = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : List[str] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
SCREAMING_SNAKE_CASE : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , )
| 703 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCamelCase__ : str = logging.get_logger(__name__)
class lowercase__( __lowercase ):
'''simple docstring'''
def __init__( self :Tuple , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Optional[int] ) -> int:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 704 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ : Optional[Any] = '''Muhammad Umer Farooq'''
lowerCamelCase__ : List[Any] = '''MIT'''
lowerCamelCase__ : List[str] = '''1.0.0'''
lowerCamelCase__ : Union[str, Any] = '''Muhammad Umer Farooq'''
lowerCamelCase__ : Any = '''contact@muhammadumerfarooq.me'''
lowerCamelCase__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase__( __a ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :str ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = domain
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :list[tuple[str, str | None]] ) -> Union[str, Any]:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
SCREAMING_SNAKE_CASE : Tuple = parse.urljoin(self.domain , snake_case__ )
self.urls.append(snake_case__ )
def __A ( a_ : str )-> Dict:
'''simple docstring'''
return ".".join(get_sub_domain_name(lowerCAmelCase__ ).split('''.''' )[-2:] )
def __A ( a_ : Dict )-> str:
'''simple docstring'''
return parse.urlparse(lowerCAmelCase__ ).netloc
def __A ( a_ : str = "https://github.com" )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_domain_name(lowerCAmelCase__ )
# Initialize the parser
SCREAMING_SNAKE_CASE : Optional[Any] = Parser(lowerCAmelCase__ )
try:
# Open URL
SCREAMING_SNAKE_CASE : Any = requests.get(lowerCAmelCase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
SCREAMING_SNAKE_CASE : Tuple = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
SCREAMING_SNAKE_CASE : int = requests.get(lowerCAmelCase__ )
# Get the valid email.
SCREAMING_SNAKE_CASE : List[str] = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCAmelCase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 705 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 0 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :str = 13 , lowerCamelCase_ :Union[str, Any] = 64 , lowerCamelCase_ :Optional[Any] = 2 , lowerCamelCase_ :Dict = 3 , lowerCamelCase_ :List[Any] = 3 , lowerCamelCase_ :Tuple = True , lowerCamelCase_ :Union[str, Any] = True , lowerCamelCase_ :List[str] = 1_28 , lowerCamelCase_ :str=[16, 32, 64, 1_28] , lowerCamelCase_ :Union[str, Any] = 7 , lowerCamelCase_ :Any = 4 , lowerCamelCase_ :Any = 37 , lowerCamelCase_ :int = "gelu" , lowerCamelCase_ :List[Any] = 0.1 , lowerCamelCase_ :Any = 0.1 , lowerCamelCase_ :List[str] = 10 , lowerCamelCase_ :str = 0.0_2 , lowerCamelCase_ :Any = 2 , lowerCamelCase_ :str = 1 , lowerCamelCase_ :Optional[Any] = 1_28 , lowerCamelCase_ :Dict = [2, 2, 2, 2] , lowerCamelCase_ :Union[str, Any] = 2 , lowerCamelCase_ :Any = 2 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = encoder_stride
SCREAMING_SNAKE_CASE : str = num_attention_outputs
SCREAMING_SNAKE_CASE : str = embed_dim
SCREAMING_SNAKE_CASE : Dict = embed_dim + 1
SCREAMING_SNAKE_CASE : str = resolution
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : str = dim
SCREAMING_SNAKE_CASE : Any = mlp_expansion_ratio
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFEfficientFormerModel(config=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(a_ , training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[str] = TFEfficientFormerForImageClassification(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = TFEfficientFormerForImageClassification(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFEfficientFormerModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(
self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def __lowerCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(a_ )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any ):
SCREAMING_SNAKE_CASE : int = model_class(a_ )
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : List[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a_ ) , a_ )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE : int = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE : str = outputs.decoder_hidden_states
self.asseretIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , a_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''seq_length''' , a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , '''decoder_seq_length''' , a_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(a_ , a_ , a_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowerCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = TFEfficientFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Tuple = getattr(self.model_tester , '''seq_length''' , a_ )
SCREAMING_SNAKE_CASE : List[Any] = getattr(self.model_tester , '''encoder_seq_length''' , a_ )
SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , '''key_length''' , a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.model_tester , '''chunk_length''' , a_ )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
SCREAMING_SNAKE_CASE : Optional[int] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : str = model_class(a_ )
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Any = model_class(a_ )
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __lowerCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE : Optional[int] = model_class(a_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE : List[str] = model(a_ )
self.assertTrue(outputs_dict is not None )
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=a_ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE : Any = model(**a_ , training=a_ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE : Any = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=a_ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE : int = model(**a_ , training=a_ )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE : List[str] = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ : str = logging.getLogger()
def __A ( a_ : Tuple )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_snake_case , '''all_results.json''' )
if os.path.exists(_snake_case ):
with open(_snake_case , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(_snake_case )
else:
raise ValueError(F"can\'t find {path}" )
return results
lowerCamelCase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Optional[Any] = f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = time()
xla_spawn.main()
SCREAMING_SNAKE_CASE : str = time()
SCREAMING_SNAKE_CASE : Any = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE : Tuple = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ):
xla_spawn.main()
| 707 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 0 |
def __A ( a_ : int = 10_00 )-> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 708 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 0 |
"""simple docstring"""
from functools import lru_cache
def __A ( a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : Any = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A_ )
if n > 1:
factors.add(A_ )
return factors
@lru_cache
def __A ( a_ : List[str] )-> List[str]:
'''simple docstring'''
return len(unique_prime_factors(A_ ) )
def __A ( a_ : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
return len(set(A_ ) ) in (0, 1)
def __A ( a_ : int )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE : List[Any] = [base + i for i in range(A_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE : Tuple = [upf_len(A_ ) for x in group]
checker.append(A_ )
# If all numbers in the list are equal, return the group variable.
if equality(A_ ):
return group
# Increment our base variable by 1
base += 1
def __A ( a_ : Union[str, Any] = 4 )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = run(A_ )
return results[0] if len(A_ ) else None
if __name__ == "__main__":
print(solution())
| 709 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any]=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=True , lowerCamelCase_ :str=True , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :str=True , lowerCamelCase_ :List[str]=99 , lowerCamelCase_ :Dict=32 , lowerCamelCase_ :Any=5 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Optional[int]=None , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : List[str] = use_input_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : Any = scope
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BioGptModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , attention_mask=_A )
SCREAMING_SNAKE_CASE : Tuple = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , *lowerCamelCase_ :Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
SCREAMING_SNAKE_CASE : Optional[int] = self.seq_length // 2
SCREAMING_SNAKE_CASE : List[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE : Dict = ids_tensor((1,) , _A ).item() + 1
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE : int = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , attention_mask=_A )['last_hidden_state']
SCREAMING_SNAKE_CASE : Any = model(_A , past_key_values=_A , attention_mask=_A )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : int = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , *lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BioGptModel(config=_A ).to(_A ).eval()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
# first forward pass
SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , attention_mask=_A , use_cache=_A )
SCREAMING_SNAKE_CASE : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )['last_hidden_state']
SCREAMING_SNAKE_CASE : Any = model(_A , attention_mask=_A , past_key_values=_A )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , *lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE : List[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[Any] , *lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BioGptModel(_A )
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] , *lowerCamelCase_ :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : int = type
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
SCREAMING_SNAKE_CASE : Tuple = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Dict = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE : int = tokenizer.eos_token
SCREAMING_SNAKE_CASE : Any = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Optional[Any] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE : int = tokenizer(_A , return_tensors='''pt''' , padding=_A )
SCREAMING_SNAKE_CASE : Dict = inputs['input_ids'].to(_A )
SCREAMING_SNAKE_CASE : Tuple = model.generate(
input_ids=_A , attention_mask=inputs['''attention_mask'''].to(_A ) , )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_A )
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(input_ids=_A )
SCREAMING_SNAKE_CASE : Any = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_A )
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(_A , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(1 ).to(_A )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : List[Any] = 'multi_label_classification'
SCREAMING_SNAKE_CASE : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE : int = input_ids.ne(1 ).to(_A )
SCREAMING_SNAKE_CASE : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : Dict = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
SCREAMING_SNAKE_CASE : List[str] = model(_A )[0]
SCREAMING_SNAKE_CASE : Tuple = 4_23_84
SCREAMING_SNAKE_CASE : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 10.45_57], [-11.04_69, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : List[str] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_A )
SCREAMING_SNAKE_CASE : Dict = model.generate(
**_A , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=_A , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE : Dict = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(_A , _A )
| 710 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
lowerCamelCase__ : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
lowerCamelCase__ : Union[str, Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase__ : Union[str, Any] = f'''down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Optional[int] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase__ : int = f'''down_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : str = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase__ : str = f'''up_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : Any = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase__ : List[str] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCamelCase__ : List[Any] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase__ : Any = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : List[str] = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase__ : Union[str, Any] = "mid_block.attentions.0."
lowerCamelCase__ : Union[str, Any] = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase__ : Optional[Any] = f'''mid_block.resnets.{j}.'''
lowerCamelCase__ : Optional[int] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __A ( a_ : Dict )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE : int = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE : Optional[Any] = v.replace(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE : Any = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE : Optional[Any] = v.replace(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE : Tuple = v
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase__ : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase__ : Any = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Optional[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase__ : List[Any] = f'''down_blocks.{i}.downsamplers.0.'''
lowerCamelCase__ : str = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase__ : int = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : Optional[Any] = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase__ : Any = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Tuple = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase__ : str = f'''mid_block.resnets.{i}.'''
lowerCamelCase__ : Union[str, Any] = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase__ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def __A ( a_ : Optional[Any] )-> List[str]:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def __A ( a_ : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE : str = v.replace(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE : Any = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE : Dict = v.replace(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE : Tuple = v
SCREAMING_SNAKE_CASE : Dict = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE : Dict = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE : int = reshape_weight_for_sd(__snake_case )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase__ : Tuple = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
lowerCamelCase__ : int = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase__ : List[str] = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase__ : Dict = {"q": 0, "k": 1, "v": 2}
def __A ( a_ : Optional[int] )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
SCREAMING_SNAKE_CASE : Dict = k[: -len('''.q_proj.weight''' )]
SCREAMING_SNAKE_CASE : List[Any] = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE : List[str] = [None, None, None]
SCREAMING_SNAKE_CASE : int = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
SCREAMING_SNAKE_CASE : Optional[Any] = k[: -len('''.q_proj.bias''' )]
SCREAMING_SNAKE_CASE : Dict = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE : List[str] = [None, None, None]
SCREAMING_SNAKE_CASE : Optional[int] = v
continue
SCREAMING_SNAKE_CASE : Any = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] , __snake_case )
SCREAMING_SNAKE_CASE : List[str] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
SCREAMING_SNAKE_CASE : Dict = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] , __snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(__snake_case )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
SCREAMING_SNAKE_CASE : Optional[Any] = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] , __snake_case )
SCREAMING_SNAKE_CASE : Dict = torch.cat(__snake_case )
return new_state_dict
def __A ( a_ : Optional[Any] )-> Tuple:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
lowerCamelCase__ : Tuple = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase__ : Dict = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
lowerCamelCase__ : List[Any] = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
lowerCamelCase__ : Optional[Any] = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase__ : Optional[Any] = load_file(unet_path, device="cpu")
else:
lowerCamelCase__ : Any = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
lowerCamelCase__ : Optional[int] = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
lowerCamelCase__ : Tuple = load_file(vae_path, device="cpu")
else:
lowerCamelCase__ : Dict = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
lowerCamelCase__ : str = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
lowerCamelCase__ : int = load_file(text_enc_path, device="cpu")
else:
lowerCamelCase__ : Dict = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
lowerCamelCase__ : int = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
lowerCamelCase__ : Optional[Any] = convert_unet_state_dict(unet_state_dict)
lowerCamelCase__ : List[Any] = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase__ : str = convert_vae_state_dict(vae_state_dict)
lowerCamelCase__ : Optional[Any] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase__ : Tuple = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase__ : Optional[int] = {"transformer." + k: v for k, v in text_enc_dict.items()}
lowerCamelCase__ : int = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase__ : List[Any] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase__ : Optional[int] = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase__ : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase__ : Tuple = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase__ : Optional[int] = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 711 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = BloomTokenizerFast
UpperCamelCase = BloomTokenizerFast
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = """tokenizer_file"""
UpperCamelCase = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : str = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :List[Any] , **lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
SCREAMING_SNAKE_CASE : Any = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_encode_plus(lowerCamelCase_ )["""input_ids"""]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any]=6 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE : Optional[int] = """This is a simple input"""
SCREAMING_SNAKE_CASE : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE : Dict = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(lowerCamelCase_ ) )["""premise"""] # pick up one data
SCREAMING_SNAKE_CASE : Optional[Any] = list(sample_data.values() )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(tokenizer.encode , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) for x in output_tokens]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 712 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase__:
'''simple docstring'''
UpperCamelCase = BlenderbotConfig
UpperCamelCase = {}
UpperCamelCase = 'gelu'
def __init__( self :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str=13 , lowerCamelCase_ :Any=7 , lowerCamelCase_ :Any=True , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=99 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[Any]=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Any=20 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=1 , lowerCamelCase_ :List[str]=0 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : int = bos_token_id
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE : int = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def __lowerCAmelCase ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
SCREAMING_SNAKE_CASE : Dict = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : int = inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE : List[Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : int = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def __A ( a_ : List[str] , a_ : str , a_ : str , a_ : int=None , a_ : List[Any]=None , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , a_ : Tuple=None , )-> Tuple:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFBlenderbotModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ['My friends are cool but they eat too many carbs.']
UpperCamelCase = 'facebook/blenderbot-400M-distill'
@cached_property
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.generate(
model_inputs.input_ids , )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 713 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class lowercase__:
'''simple docstring'''
def __init__( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4]
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = len(self.first_signal )
SCREAMING_SNAKE_CASE : int = len(self.second_signal )
SCREAMING_SNAKE_CASE : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE : Tuple = [[0] * max_length for i in range(lowerCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase_ )
for j, item in enumerate(lowerCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE : List[str] = np.matmul(np.transpose(lowerCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 714 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCamelCase__ : List[str] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCamelCase__ : Optional[Any] = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
lowerCamelCase__ : List[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :int = CHRF.CHAR_ORDER , lowerCamelCase_ :int = CHRF.WORD_ORDER , lowerCamelCase_ :int = CHRF.BETA , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
SCREAMING_SNAKE_CASE : str = [[refs[i] for refs in references] for i in range(lowercase_ )]
SCREAMING_SNAKE_CASE : List[Any] = CHRF(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE : List[Any] = sb_chrf.corpus_score(lowercase_ , lowercase_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 715 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class lowercase__:
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = question_encoder
SCREAMING_SNAKE_CASE : str = generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.question_encoder
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCAmelCase_ , '''question_encoder_tokenizer''' )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCAmelCase_ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls :Any , lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''config''' , lowerCAmelCase_ )
if config is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = RagConfig.from_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self :Optional[Any] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self :Dict , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self :str , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.question_encoder
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.generator
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any = None , lowerCamelCase_ :List[Any] = None , lowerCamelCase_ :Dict = None , lowerCamelCase_ :List[str] = "longest" , lowerCamelCase_ :Union[str, Any] = None , lowerCamelCase_ :int = True , **lowerCamelCase_ :Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , lowerCAmelCase_ , )
if max_length is None:
SCREAMING_SNAKE_CASE : int = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE : Tuple = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE : List[str] = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE : Dict = labels['''input_ids''']
return model_inputs
| 716 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 'Salesforce/blip-image-captioning-base'
UpperCamelCase = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
UpperCamelCase = 'image_captioner'
UpperCamelCase = AutoModelForVisionaSeq
UpperCamelCase = ['image']
UpperCamelCase = ['text']
def __init__( self :List[str] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :"Image" ) -> Dict:
'''simple docstring'''
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
return self.model.generate(**UpperCAmelCase__ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[Any] ) -> Dict:
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase__:
'''simple docstring'''
UpperCamelCase = BlenderbotSmallConfig
UpperCamelCase = {}
UpperCamelCase = """gelu"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :str=99 , lowerCamelCase_ :List[Any]=32 , lowerCamelCase_ :str=2 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :Optional[int]=37 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]=20 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Any=0 , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : Dict = bos_token_id
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE : List[str] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : Optional[int] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Tuple = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE : int = 1
# first forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def __A ( a_ : Tuple , a_ : Tuple , a_ : Optional[int] , a_ : Optional[int]=None , a_ : str=None , a_ : List[str]=None , a_ : str=None , a_ : Tuple=None , )-> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=snake_case__ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
UpperCamelCase = """facebook/blenderbot_small-90M"""
@cached_property
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.src_text , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 718 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__( __lowerCamelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''num_attention_heads''' ) )
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int=13 , lowerCamelCase_ :str=64 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :str=2 , lowerCamelCase_ :Optional[int]=1 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=[1_28, 2_56, 3_84] , lowerCamelCase_ :Optional[int]=[4, 6, 8] , lowerCamelCase_ :Tuple=[2, 3, 4] , lowerCamelCase_ :Tuple=[16, 16, 16] , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :List[Any]=[2, 2, 2] , lowerCamelCase_ :Optional[Any]=[2, 2, 2] , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Tuple=2 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = kernel_size
SCREAMING_SNAKE_CASE : int = stride
SCREAMING_SNAKE_CASE : Union[str, Any] = padding
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : Any = key_dim
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = attention_ratio
SCREAMING_SNAKE_CASE : int = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : str = initializer_range
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LevitModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[Any] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : str = LevitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = LevitModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def __lowerCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : int = outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : str = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=False ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Any = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : List[str] = problem_type['''title''']
SCREAMING_SNAKE_CASE : Union[str, Any] = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : str = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE : List[Any] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as warning_list:
SCREAMING_SNAKE_CASE : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __A ( )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
def __A ( a_ : int = 1_00 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCamelCase__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Dict = 256
class lowercase__( lowercase__ ):
'''simple docstring'''
UpperCamelCase = ["""melgan"""]
def __init__( self :Tuple , lowerCamelCase_ :SpectrogramNotesEncoder , lowerCamelCase_ :SpectrogramContEncoder , lowerCamelCase_ :TaFilmDecoder , lowerCamelCase_ :DDPMScheduler , lowerCamelCase_ :OnnxRuntimeModel if is_onnx_available() else Any , ) -> List[str]:
'''simple docstring'''
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE : List[str] = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE : Any = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE : str = 1_28
self.register_modules(
notes_encoder=UpperCAmelCase__ , continuous_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , melgan=UpperCAmelCase__ , )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any]=(-1.0, 1.0) , lowerCamelCase_ :List[Any]=False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = output_range
if clip:
SCREAMING_SNAKE_CASE : Dict = torch.clip(UpperCAmelCase__ , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=(-1.0, 1.0) , lowerCamelCase_ :Optional[int]=False ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = input_range
SCREAMING_SNAKE_CASE : List[Any] = torch.clip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = input_tokens > 0
SCREAMING_SNAKE_CASE : Optional[Any] = self.notes_encoder(
encoder_input_tokens=UpperCAmelCase__ , encoder_inputs_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.continuous_encoder(
encoder_inputs=UpperCAmelCase__ , encoder_inputs_mask=UpperCAmelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = noise_time
if not torch.is_tensor(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Dict = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCAmelCase__ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : Any = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE : Optional[Any] = self.decoder(
encodings_and_masks=UpperCAmelCase__ , decoder_input_tokens=UpperCAmelCase__ , decoder_noise_time=UpperCAmelCase__ )
return logits
@torch.no_grad()
def __call__( self :Dict , lowerCamelCase_ :List[List[int]] , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :int = 1_00 , lowerCamelCase_ :bool = True , lowerCamelCase_ :str = "numpy" , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , ) -> List[str]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(UpperCAmelCase__ )}." )
SCREAMING_SNAKE_CASE : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE : Any = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase__ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCAmelCase__ ):
if i == 0:
SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE : int = ones
SCREAMING_SNAKE_CASE : List[str] = self.scale_features(
UpperCAmelCase__ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCAmelCase__ , continuous_mask=UpperCAmelCase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE : str = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : str = self.decode(
encodings_and_masks=UpperCAmelCase__ , input_tokens=UpperCAmelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scale_to_features(UpperCAmelCase__ , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE : str = mel[:1]
SCREAMING_SNAKE_CASE : List[Any] = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info('''Generated segment''' , UpperCAmelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
SCREAMING_SNAKE_CASE : Dict = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE : Tuple = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase__ )
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase__:
def __init__( self :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :int=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :str=3 , lowerCamelCase_ :Any=16 , lowerCamelCase_ :Tuple=[1, 2, 1] , lowerCamelCase_ :Dict=[2, 2, 4] , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :List[str]=2.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Union[str, Any]=0.0_2 , lowerCamelCase_ :List[str]=1E-5 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :int=True , lowerCamelCase_ :Dict=10 , lowerCamelCase_ :List[Any]=8 , lowerCamelCase_ :Any=["stage1", "stage2", "stage3"] , lowerCamelCase_ :str=[1, 2, 3] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : Dict = embed_dim
SCREAMING_SNAKE_CASE : Any = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE : Tuple = window_size
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Tuple = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = encoder_stride
SCREAMING_SNAKE_CASE : Any = out_features
SCREAMING_SNAKE_CASE : Optional[int] = out_indices
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = ['''stem''']
SCREAMING_SNAKE_CASE : Dict = MaskFormerSwinBackbone(config=_lowerCamelCase )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __lowerCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase_ :Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = 0
return t
def check_equivalence(lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has"
f" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}."
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'''output_hidden_states''': True} )
@require_torch
class lowercase__( unittest.TestCase , __UpperCAmelCase ):
UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase = MaskFormerSwinConfig
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinModelTester(self )
def __lowerCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
SCREAMING_SNAKE_CASE : Optional[int] = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE : str = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE : int = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 700 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __A ( a_ : np.ndarray )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __A ( a_ : np.ndarray )-> Union[str, Any]:
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def __A ( a_ : np.ndarray , a_ : np.ndarray )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros_like(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE : List[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE : Tuple = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCamelCase__ : Union[str, Any] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
lowerCamelCase__ : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
lowerCamelCase__ : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCamelCase__ : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCamelCase__ : List[str] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 701 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 0 |
"""simple docstring"""
from math import sqrt
def __A ( a_ : Any )-> bool:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
SCREAMING_SNAKE_CASE : int = True
# 0 and 1 are none primes.
if number <= 1:
SCREAMING_SNAKE_CASE : Any = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
SCREAMING_SNAKE_CASE : int = False
break
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'status' must been from type bool"
return status
def __A ( a_ : str )-> Optional[int]:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
SCREAMING_SNAKE_CASE : Any = list(range(2 , n + 1 ) )
SCREAMING_SNAKE_CASE : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
SCREAMING_SNAKE_CASE : Tuple = 0
# filters actual prime numbers.
SCREAMING_SNAKE_CASE : Optional[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def __A ( a_ : Dict )-> List[str]:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
SCREAMING_SNAKE_CASE : Any = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase__ ):
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def __A ( a_ : List[str] )-> Optional[int]:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number >= 0, "'number' must been an int and >= 0"
SCREAMING_SNAKE_CASE : Any = [] # this list will be returns of the function.
# potential prime number factors.
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = number
if number == 0 or number == 1:
ans.append(UpperCamelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase__ ):
while quotient != 1:
if is_prime(UpperCamelCase__ ) and (quotient % factor == 0):
ans.append(UpperCamelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def __A ( a_ : Tuple )-> Optional[int]:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE : Any = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE : Tuple = prime_factorization(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = max(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def __A ( a_ : Tuple )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE : Optional[int] = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE : int = prime_factorization(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = min(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def __A ( a_ : Optional[int] )-> List[str]:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def __A ( a_ : Any )-> str:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def __A ( a_ : List[Any] )-> int:
'''simple docstring'''
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (number > 2) and is_even(UpperCamelCase__ )
), "'number' must been an int, even and > 2"
SCREAMING_SNAKE_CASE : Any = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
SCREAMING_SNAKE_CASE : List[str] = get_prime_numbers(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = len(UpperCamelCase__ )
# run variable for while-loops.
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = None
# exit variable. for break up the loops
SCREAMING_SNAKE_CASE : Optional[int] = True
while i < len_pn and loop:
SCREAMING_SNAKE_CASE : Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
SCREAMING_SNAKE_CASE : str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (len(UpperCamelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __A ( a_ : List[str] , a_ : Optional[int] )-> Tuple:
'''simple docstring'''
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE : Any = 0
while numbera != 0:
SCREAMING_SNAKE_CASE : Dict = numbera % numbera
SCREAMING_SNAKE_CASE : Optional[Any] = numbera
SCREAMING_SNAKE_CASE : int = rest
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __A ( a_ : Any , a_ : Tuple )-> Any:
'''simple docstring'''
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE : Optional[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
SCREAMING_SNAKE_CASE : Union[str, Any] = prime_factorization(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(UpperCamelCase__ )
elif numbera == 1 or numbera == 1:
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : List[Any] = max(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
SCREAMING_SNAKE_CASE : Tuple = prime_fac_a.count(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = prime_fac_a.count(UpperCamelCase__ )
for _ in range(max(UpperCamelCase__ , UpperCamelCase__ ) ):
ans *= n
else:
SCREAMING_SNAKE_CASE : int = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
SCREAMING_SNAKE_CASE : Union[str, Any] = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'number' must been a positive int"
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Dict = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase__ ):
ans += 1
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and is_prime(
UpperCamelCase__ ), "'ans' must been a prime number and from type int"
return ans
def __A ( a_ : List[str] , a_ : List[str] )-> Any:
'''simple docstring'''
assert (
is_prime(UpperCamelCase__ ) and is_prime(UpperCamelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
SCREAMING_SNAKE_CASE : List[str] = p_number_a + 1 # jump to the next number
SCREAMING_SNAKE_CASE : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and ans[0] != p_number_a
and ans[len(UpperCamelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __A ( a_ : List[str] )-> Dict:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 1), "'n' must been int and >= 1"
SCREAMING_SNAKE_CASE : int = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase__ )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __A ( a_ : str )-> Any:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
SCREAMING_SNAKE_CASE : Union[str, Any] = get_divisors(UpperCamelCase__ )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __A ( a_ : List[str] , a_ : Dict )-> int:
'''simple docstring'''
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
SCREAMING_SNAKE_CASE : Union[str, Any] = gcd(abs(UpperCamelCase__ ) , abs(UpperCamelCase__ ) )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __A ( a_ : Dict )-> int:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
SCREAMING_SNAKE_CASE : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __A ( a_ : str )-> str:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
SCREAMING_SNAKE_CASE : Tuple = ans
ans += fiba
SCREAMING_SNAKE_CASE : List[Any] = tmp
return ans
| 702 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __A ( a_ : Dict , a_ : int , a_ : int=None , a_ : str=None )-> Optional[int]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase__:
'''simple docstring'''
UpperCamelCase = OPTConfig
UpperCamelCase = {}
UpperCamelCase = """gelu"""
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any]=13 , lowerCamelCase_ :int=7 , lowerCamelCase_ :Any=True , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :str=99 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Tuple=20 , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Tuple=16 , lowerCamelCase_ :Optional[Any]=16 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : List[str] = embed_dim
SCREAMING_SNAKE_CASE : int = word_embed_proj_dim
SCREAMING_SNAKE_CASE : List[Any] = False
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFOPTModel(config=lowercase_ )
SCREAMING_SNAKE_CASE : List[Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : Any = input_ids[:1, :]
SCREAMING_SNAKE_CASE : int = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE : List[Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Dict = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : int = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Dict = model(lowercase_ , attention_mask=lowercase_ )[0]
SCREAMING_SNAKE_CASE : str = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : int = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
@require_tf
class lowercase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 10
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowercase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ):
if hasattr(lowercase_ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE : List[str] = model_class(config=lowercase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : Any = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : Tuple = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE : Any = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
SCREAMING_SNAKE_CASE : List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : Any = False
self.assertTrue(lowercase_ )
def __A ( a_ : Any )-> Optional[Any]:
'''simple docstring'''
return tf.constant(__SCREAMING_SNAKE_CASE , dtype=tf.intaa )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 99
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE : Tuple = input_ids.shape[0]
SCREAMING_SNAKE_CASE : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
SCREAMING_SNAKE_CASE : List[Any] = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE : int = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE : Optional[Any] = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 11, 5_12)
self.assertEqual(output.shape , lowercase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4E-3 ) )
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(lowercase_ , jit_compile=lowercase_ )
SCREAMING_SNAKE_CASE : Dict = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4E-2 ) )
@require_tf
@slow
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = "facebook/opt-350m"
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : str = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : Optional[Any] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowercase_ , return_tensors='''tf''' , padding=lowercase_ , add_special_tokens=lowercase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(lowercase_ , jit_compile=lowercase_ )
SCREAMING_SNAKE_CASE : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
@require_tf
@slow
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "facebook/opt-125m"
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = GPTaTokenizer.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : str = tokenizer(lowercase_ , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(lowercase_ , max_length=10 )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "facebook/opt-350m"
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = "left"
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Tuple = [
"Hello, my dog is a little",
"Today, I",
]
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowercase_ , return_tensors='''tf''' , padding=lowercase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(input_ids=lowercase_ , attention_mask=inputs['''attention_mask'''] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowercase_ )
SCREAMING_SNAKE_CASE : int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE : Dict = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "facebook/opt-350m"
SCREAMING_SNAKE_CASE : Optional[Any] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE : int = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(lowercase_ , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : List[str] = model.generate(lowercase_ , max_length=10 )
SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 703 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
def __A ( a_ : Optional[Any] , a_ : Tuple , a_ : List[Any] )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Path(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = Path(a_ )
dest_dir.mkdir(exist_ok=a_ )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE : str = dest_dir.joinpath(path.name )
print(a_ )
dest_path.open('''w''' ).write('''\n'''.join(a_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 704 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__( __lowercase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : List[str] = process
SCREAMING_SNAKE_CASE : List[Any] = params
def __len__( self :str ) -> List[str]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self :Tuple , lowerCamelCase_ :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.dataset[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process(lowerCamelCase_ , **self.params )
return processed
class lowercase__( __lowercase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict=None ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = loader
SCREAMING_SNAKE_CASE : Optional[int] = infer
SCREAMING_SNAKE_CASE : Any = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def __len__( self :int ) -> str:
'''simple docstring'''
return len(self.loader )
def __iter__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = iter(self.loader )
return self
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Tuple = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : List[str] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : Optional[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Tuple = self._loader_batch_data.__class__(lowerCamelCase_ )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Any = next(self.iterator )
SCREAMING_SNAKE_CASE : Optional[int] = self.infer(lowerCamelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase_ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[Any] = processed
else:
SCREAMING_SNAKE_CASE : Tuple = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Any = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : int = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : Tuple = processed
SCREAMING_SNAKE_CASE : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__( __lowercase ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict=None ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __iter__( self :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = iter(self.loader )
SCREAMING_SNAKE_CASE : Optional[int] = None
return self
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : List[str] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : List[Any] = next(self.subiterator )
return processed
class lowercase__( __lowercase ):
'''simple docstring'''
def __iter__( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = iter(self.loader )
return self
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Optional[Any] = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Union[str, Any] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase_ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase_ , torch.Tensor ):
SCREAMING_SNAKE_CASE : str = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : Tuple = processed[key]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : Optional[int] = observed_batch_size
SCREAMING_SNAKE_CASE : Tuple = processed
SCREAMING_SNAKE_CASE : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : List[str] = self.loader_batch_item()
SCREAMING_SNAKE_CASE : str = item.pop('''is_last''' )
accumulator.append(lowerCamelCase_ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : Optional[int] = processed
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase_ )
return accumulator
class lowercase__( __lowercase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :Dataset , lowerCamelCase_ :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = dataset
SCREAMING_SNAKE_CASE : Union[str, Any] = key
def __len__( self :Optional[int] ) -> Dict:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self :Dict , lowerCamelCase_ :int ) -> Union[str, Any]:
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__( __lowercase ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :Dataset , lowerCamelCase_ :str , lowerCamelCase_ :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = dataset
SCREAMING_SNAKE_CASE : Optional[int] = keya
SCREAMING_SNAKE_CASE : Dict = keya
def __len__( self :Tuple ) -> Dict:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self :str , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 705 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 0
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : List[str] = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Optional[int] = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : List[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Tuple = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : str = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Optional[Any] = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __A ( )-> List[str]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(a_ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def __A ( )-> str:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def __A ( )-> int:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(a_ ):
http_head('''https://huggingface.co''' )
| 707 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 0 |
import string
def __A ( a_ : str )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ''
for i in sequence:
SCREAMING_SNAKE_CASE : Dict = ord(lowerCAmelCase__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def __A ( a_ : str )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = string.ascii_letters
SCREAMING_SNAKE_CASE : Any = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def __A ( )-> List[str]:
'''simple docstring'''
from timeit import timeit
print('''Running performance benchmarks...''' )
SCREAMING_SNAKE_CASE : Any = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase__ )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase__ )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 708 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Any = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = "▁"
lowerCamelCase__ : Optional[int] = {"vocab_file": "prophetnet.tokenizer"}
lowerCamelCase__ : Tuple = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
lowerCamelCase__ : Optional[int] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
lowerCamelCase__ : str = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def __A ( a_ : Dict )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = collections.OrderedDict()
with open(a_ , '''r''' , encoding='''utf-8''' ) as reader:
SCREAMING_SNAKE_CASE : Optional[Any] = reader.readlines()
for index, token in enumerate(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = token.rstrip('''\n''' )
SCREAMING_SNAKE_CASE : Optional[int] = index
return vocab
class lowercase__( lowercase_ ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any]="[SEP]" , lowerCamelCase_ :List[str]="[SEP]" , lowerCamelCase_ :Optional[Any]="[SEP]" , lowerCamelCase_ :int="[UNK]" , lowerCamelCase_ :int="[PAD]" , lowerCamelCase_ :Dict="[CLS]" , lowerCamelCase_ :Tuple="[MASK]" , lowerCamelCase_ :Optional[Dict[str, Any]] = None , **lowerCamelCase_ :Any , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE : Optional[Any] = f"[unused{i}]"
SCREAMING_SNAKE_CASE : Tuple = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Optional[int] = 12
SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase__ )
def __getstate__( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self :Dict , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :bool = False ) -> Union[str, Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __lowerCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str ) -> Any:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Tuple ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Tuple ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> str:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 710 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowerCamelCase__ : Dict = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCamelCase__ : Optional[int] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
lowerCamelCase__ : Tuple = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def __A ( a_ : List[Any] )-> Any:
'''simple docstring'''
def remove_articles(a_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__lowerCAmelCase , ''' ''' , __lowerCAmelCase )
def white_space_fix(a_ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(a_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a_ : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __A ( a_ : Tuple , a_ : str )-> int:
'''simple docstring'''
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __A ( a_ : Optional[Any] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )]
return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 1_00
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : Tuple , a_ : str )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
SCREAMING_SNAKE_CASE : Dict = Counter(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = Counter(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = Counter()
for sgram, scount in sgramcounter.items():
SCREAMING_SNAKE_CASE : Dict = scount * numref
SCREAMING_SNAKE_CASE : Tuple = Counter(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
SCREAMING_SNAKE_CASE : List[str] = ccount * numref
# KEEP
SCREAMING_SNAKE_CASE : Optional[Any] = sgramcounter_rep & cgramcounter_rep
SCREAMING_SNAKE_CASE : str = keepgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE : Tuple = sgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = 1
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Any = keeptmpscorea / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
SCREAMING_SNAKE_CASE : Dict = keeptmpscorea / sum(keepgramcounterall_rep.values() )
SCREAMING_SNAKE_CASE : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
SCREAMING_SNAKE_CASE : List[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
SCREAMING_SNAKE_CASE : Tuple = sgramcounter_rep - cgramcounter_rep
SCREAMING_SNAKE_CASE : str = delgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE : List[Any] = sgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : Tuple = 1
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = deltmpscorea / len(__lowerCAmelCase )
# ADDITION
SCREAMING_SNAKE_CASE : Union[str, Any] = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = set(__lowerCAmelCase ) & set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Tuple = 1
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Tuple = addtmpscore / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : int = addtmpscore / len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = 0
if addscore_precision > 0 or addscore_recall > 0:
SCREAMING_SNAKE_CASE : str = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __A ( a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Tuple )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = ssent.split(''' ''' )
SCREAMING_SNAKE_CASE : List[str] = csent.split(''' ''' )
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Any = []
for rsent in rsents:
SCREAMING_SNAKE_CASE : List[Any] = rsent.split(''' ''' )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Any = []
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
SCREAMING_SNAKE_CASE : str = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
SCREAMING_SNAKE_CASE : List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE : Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
SCREAMING_SNAKE_CASE : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
SCREAMING_SNAKE_CASE : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
SCREAMING_SNAKE_CASE : List[Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
SCREAMING_SNAKE_CASE : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__lowerCAmelCase )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : str = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Optional[int] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : List[str] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Dict = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
SCREAMING_SNAKE_CASE : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4
SCREAMING_SNAKE_CASE : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
SCREAMING_SNAKE_CASE : Any = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __A ( a_ : Optional[int] , a_ : List[str] = True , a_ : Optional[Any] = "13a" , a_ : Union[str, Any] = True )-> str:
'''simple docstring'''
if lowercase:
SCREAMING_SNAKE_CASE : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
SCREAMING_SNAKE_CASE : Optional[int] = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase )
elif tokenizer == "moses":
SCREAMING_SNAKE_CASE : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase )
elif tokenizer == "penn":
SCREAMING_SNAKE_CASE : Any = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE : Any = sentence
if not return_str:
SCREAMING_SNAKE_CASE : Tuple = normalized_sent.split()
return normalized_sent
def __A ( a_ : Optional[int] , a_ : List[str] , a_ : str )-> Any:
'''simple docstring'''
if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
SCREAMING_SNAKE_CASE : Any = 0
for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] )
SCREAMING_SNAKE_CASE : Union[str, Any] = sari_score / len(__lowerCAmelCase )
return 1_00 * sari_score
def __A ( a_ : List[Any] , a_ : int , a_ : str="exp" , a_ : Dict=None , a_ : Tuple=False , a_ : Optional[Any]=False , a_ : Dict=False , )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
SCREAMING_SNAKE_CASE : str = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE : List[Any] = sacrebleu.corpus_bleu(
__lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 711 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
class lowercase__:
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[list[Edge]] = [[] for _ in range(_UpperCAmelCase )]
SCREAMING_SNAKE_CASE : Dict = size
def __getitem__( self :str , lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self._size
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_UpperCAmelCase , _UpperCAmelCase ) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = deque([start_vertex] )
SCREAMING_SNAKE_CASE : list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE : int = 0
while queue:
SCREAMING_SNAKE_CASE : Any = queue.popleft()
SCREAMING_SNAKE_CASE : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE : List[str] = current_distance + edge.weight
SCREAMING_SNAKE_CASE : str = distances[edge.destination_vertex]
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE : str = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( a_ : Optional[int]=None , a_ : Optional[Any]=None )-> int:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
metadata={"""help""": """The csv file to plot."""} , )
UpperCamelCase = field(
default=_a , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
UpperCamelCase = field(
default=_a , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
UpperCamelCase = field(
default=_a , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
UpperCamelCase = field(
default=_a , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
UpperCamelCase = field(
default=_a , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
UpperCamelCase = list_field(
default=_a , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def __A ( a_ : Tuple )-> int:
'''simple docstring'''
try:
int(UpperCAmelCase__ )
return True
except ValueError:
return False
def __A ( a_ : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
try:
float(UpperCAmelCase__ )
return True
except ValueError:
return False
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = args
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
SCREAMING_SNAKE_CASE : Tuple = csv.DictReader(_A )
for row in reader:
SCREAMING_SNAKE_CASE : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
SCREAMING_SNAKE_CASE : Tuple = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
SCREAMING_SNAKE_CASE : int = float(row['''result'''] )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = plt.subplots()
SCREAMING_SNAKE_CASE : List[str] = '''Time usage''' if self.args.is_time else '''Memory usage'''
SCREAMING_SNAKE_CASE : Dict = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
SCREAMING_SNAKE_CASE : int = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
SCREAMING_SNAKE_CASE : str = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
SCREAMING_SNAKE_CASE : Dict = self.result_dict[model_name]['''result''']
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Union[str, Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
SCREAMING_SNAKE_CASE : List[str] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) : Dict = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
SCREAMING_SNAKE_CASE : Tuple = np.asarray(_A , _A )[: len(_A )]
plt.scatter(
_A , _A , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(_A , _A , '''--''' )
title_str += f" {label_model_name} vs."
SCREAMING_SNAKE_CASE : Optional[int] = title_str[:-4]
SCREAMING_SNAKE_CASE : Dict = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = Plot(args=UpperCAmelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 0 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ : List[str] = get_tests_dir("fixtures/dummy-config.json")
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(__UpperCamelCase , '''fake-roberta''' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertEqual(type(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
# Wrong model type will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoConfig.register('''model''' , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoConfig.register('''bert''' , __UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __lowerCAmelCase ( self :int ) -> str:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """new-model"""
try:
AutoConfig.register('''new-model''' , __UpperCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 714 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 715 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase__ : Any = """src/diffusers"""
# Matches is_xxx_available()
lowerCamelCase__ : str = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCamelCase__ : Dict = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCamelCase__ : Union[str, Any] = """
{0} = None
"""
lowerCamelCase__ : Union[str, Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
lowerCamelCase__ : Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __A ( a_ : List[Any] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = _re_backend.findall(a_ )
if len(a_ ) == 0:
return None
return "_and_".join(a_ )
def __A ( )-> Optional[Any]:
'''simple docstring'''
with open(os.path.join(a_ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
# Go through the end of the file
while line_index < len(a_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE : Optional[int] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE : Optional[int] = []
# Until we unindent, add backend objects to the list
while line_index < len(a_ ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE : Dict = lines[line_index]
SCREAMING_SNAKE_CASE : Tuple = _re_single_line_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = objects
else:
line_index += 1
return backend_specific_objects
def __A ( a_ : str , a_ : List[str] )-> str:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(a_ )
elif name.islower():
return DUMMY_FUNCTION.format(a_ , a_ )
else:
return DUMMY_CLASS.format(a_ , a_ )
def __A ( a_ : List[Any]=None )-> int:
'''simple docstring'''
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE : Optional[int] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE : Optional[int] = {}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE : Any = '''[''' + ''', '''.join(F"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
SCREAMING_SNAKE_CASE : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(a_ , a_ ) for o in objects] )
SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_file
return dummy_files
def __A ( a_ : Any=False )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(a_ , '''utils''' )
SCREAMING_SNAKE_CASE : Optional[int] = {
backend: os.path.join(a_ , F"dummy_{short_names.get(a_ , a_ )}_objects.py" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE : List[str] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(a_ ):
with open(a_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read()
else:
SCREAMING_SNAKE_CASE : Any = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F"diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ : Optional[int] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 716 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def __A ( a_ : int , a_ : int , a_ : int )-> str:
'''simple docstring'''
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
SCREAMING_SNAKE_CASE : Dict = b * b - 4 * a * c
SCREAMING_SNAKE_CASE : int = (-b + sqrt(a_ )) / (2 * a)
SCREAMING_SNAKE_CASE : List[Any] = (-b - sqrt(a_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __A ( )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 0 |
"""simple docstring"""
import numpy as np
def __A ( a_ : np.array )-> List[str]:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 0 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase__( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , )
SCREAMING_SNAKE_CASE : Any = floats_tensor(control_image.shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowercase__( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCamelCase_ :Tuple ):
if isinstance(lowerCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Optional[int] = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
]
SCREAMING_SNAKE_CASE : Tuple = floats_tensor(control_image[0].shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0.0
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = steps
SCREAMING_SNAKE_CASE : Any = scale
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = steps
SCREAMING_SNAKE_CASE : Union[str, Any] = scale
SCREAMING_SNAKE_CASE : List[str] = pipe(**lowerCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = steps
SCREAMING_SNAKE_CASE : List[str] = scale
SCREAMING_SNAKE_CASE : str = pipe(**lowerCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = steps
SCREAMING_SNAKE_CASE : Dict = scale
SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCamelCase_ , controlnet=lowerCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = '''evil space-punk bird'''
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
lowerCamelCase_ , lowerCamelCase_ , control_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (5_12, 5_12, 3)
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCamelCase__ : Union[str, Any] = random.Random()
def __A ( a_ : Dict , a_ : Optional[int]=1.0 , a_ : Any=None , a_ : Union[str, Any]=None )-> List[Any]:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = global_rng
SCREAMING_SNAKE_CASE : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :int=7 , lowerCamelCase_ :List[Any]=4_00 , lowerCamelCase_ :str=20_00 , lowerCamelCase_ :List[Any]=24 , lowerCamelCase_ :Tuple=24 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :int=1_60_00 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Union[str, Any]=True , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = min_seq_length
SCREAMING_SNAKE_CASE : str = max_seq_length
SCREAMING_SNAKE_CASE : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Tuple = feature_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
SCREAMING_SNAKE_CASE : List[str] = padding_value
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : Any = return_attention_mask
SCREAMING_SNAKE_CASE : int = do_normalize
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Dict=False , lowerCamelCase_ :List[str]=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(lowerCamelCase_ :Dict ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowerCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = SpeechaTextFeatureExtractionTester(self )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Tuple ) -> int:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE : str = feature_extractor(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : str = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : str = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = inputs.input_features
SCREAMING_SNAKE_CASE : Tuple = inputs.attention_mask
SCREAMING_SNAKE_CASE : List[str] = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Dict = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : Optional[int] = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = feature_extractor(
lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inputs.input_features
SCREAMING_SNAKE_CASE : Any = inputs.attention_mask
SCREAMING_SNAKE_CASE : List[Any] = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(
lowerCamelCase_ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = inputs.input_features
SCREAMING_SNAKE_CASE : Tuple = inputs.attention_mask
SCREAMING_SNAKE_CASE : Tuple = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = inputs.input_features
SCREAMING_SNAKE_CASE : Any = inputs.attention_mask
SCREAMING_SNAKE_CASE : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
SCREAMING_SNAKE_CASE : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Dict = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=16 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = inputs.input_features
SCREAMING_SNAKE_CASE : Dict = inputs.attention_mask
SCREAMING_SNAKE_CASE : List[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.rand(1_00 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : List[str] = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
SCREAMING_SNAKE_CASE : Dict = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
| 720 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase__ : Dict = HUGGINGFACE_HUB_CACHE
lowerCamelCase__ : Tuple = "config.json"
lowerCamelCase__ : Dict = "diffusion_pytorch_model.bin"
lowerCamelCase__ : Tuple = "diffusion_flax_model.msgpack"
lowerCamelCase__ : Optional[int] = "model.onnx"
lowerCamelCase__ : Dict = "diffusion_pytorch_model.safetensors"
lowerCamelCase__ : Dict = "weights.pb"
lowerCamelCase__ : List[str] = "https://huggingface.co"
lowerCamelCase__ : Optional[Any] = default_cache_path
lowerCamelCase__ : Dict = "diffusers_modules"
lowerCamelCase__ : Optional[Any] = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
lowerCamelCase__ : int = ["fp16", "non-ema"]
lowerCamelCase__ : Optional[int] = ".self_attn"
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : List[str] = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __A ( a_ : Dict , a_ : List[str] , a_ : Optional[Any]=None , a_ : Optional[Any]=None , a_ : List[Any]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Optional[int]=None , )-> Dict:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase__:
def __init__( self :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :int=True , lowerCamelCase_ :int=False , lowerCamelCase_ :List[Any]=99 , lowerCamelCase_ :Optional[int]=16 , lowerCamelCase_ :List[Any]=2 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :Any=0.0_2 , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE : Dict = bos_token_id
SCREAMING_SNAKE_CASE : Dict = initializer_range
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
SCREAMING_SNAKE_CASE : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = prepare_blenderbot_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 20
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE : Any = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : Dict = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : str = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = model.decode(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 20
SCREAMING_SNAKE_CASE : Tuple = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : Dict = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class lowercase__( unittest.TestCase ):
UpperCamelCase = 99
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._get_config_and_data()
SCREAMING_SNAKE_CASE : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = lm_model(input_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE : int = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : List[str] = lm_model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
SCREAMING_SNAKE_CASE : List[str] = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE : Dict = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase__( _UpperCAmelCase , unittest.TestCase , _UpperCAmelCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = FlaxBlenderbotSmallModelTester(self )
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any]=None , **lowerCamelCase_ :Union[str, Any] ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Tuple = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
SCREAMING_SNAKE_CASE : Dict = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : str = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : List[Any] = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE : str = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 700 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__:
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :List[Any]=32 , lowerCamelCase_ :Tuple=5 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :List[Any]=2 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Tuple="last" , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Any=0 , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_lengths
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Any = gelu_activation
SCREAMING_SNAKE_CASE : int = sinusoidal_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = causal
SCREAMING_SNAKE_CASE : str = asm
SCREAMING_SNAKE_CASE : Dict = n_langs
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = n_special
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : Tuple = summary_type
SCREAMING_SNAKE_CASE : List[Any] = use_proj
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = XLMModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , langs=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = XLMWithLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMForQuestionAnsweringSimple(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = XLMForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , )
(SCREAMING_SNAKE_CASE ) : List[Any] = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
(SCREAMING_SNAKE_CASE ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = XLMForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = XLMForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : List[Any] = XLMForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Any=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str=False , lowerCamelCase_ :str=1 ) -> Union[str, Any]:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE : Any = min_length + idx + 1
SCREAMING_SNAKE_CASE : Any = min_length + idx + 1
SCREAMING_SNAKE_CASE : int = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Dict=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE : List[str] = min_length + idx + 1
SCREAMING_SNAKE_CASE : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , )
pass
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase_ ) # the president
SCREAMING_SNAKE_CASE : int = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
| 701 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 0 |
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def __A ( a_ : float )-> str:
'''simple docstring'''
assert type(a_ ) in (int, float) and decimal == int(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(a_ )
SCREAMING_SNAKE_CASE : Tuple = ''''''
SCREAMING_SNAKE_CASE : str = False
if decimal < 0:
SCREAMING_SNAKE_CASE : Optional[Any] = True
decimal *= -1
while decimal > 0:
SCREAMING_SNAKE_CASE : List[Any] = divmod(a_ , 16 )
SCREAMING_SNAKE_CASE : Tuple = values[remainder] + hexadecimal
SCREAMING_SNAKE_CASE : Any = '''0x''' + hexadecimal
if negative:
SCREAMING_SNAKE_CASE : Any = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 0 |
from __future__ import annotations
def __A ( a_ : int | float | str , a_ : int | float | str )-> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE : Dict = int(a_ )
SCREAMING_SNAKE_CASE : Dict = int(a_ )
SCREAMING_SNAKE_CASE : list[str] = []
for temp in range(int(a_ ) ):
series.append(F"1 / {pow(temp + 1 , int(a_ ) )}" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Any = int(input("Enter the last number (nth term) of the P-Series"))
lowerCamelCase__ : Optional[Any] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 703 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """donut-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Union[str, Any] , lowerCamelCase_ :int=2_24 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Any=96 , lowerCamelCase_ :Any=[2, 2, 6, 2] , lowerCamelCase_ :str=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :str=4.0 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Union[str, Any]=0.0_2 , lowerCamelCase_ :int=1E-5 , **lowerCamelCase_ :Any , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = embed_dim
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : Optional[Any] = window_size
SCREAMING_SNAKE_CASE : str = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Tuple = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 704 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
UpperCamelCase = """CIDAS/clipseg-rd64-refined"""
UpperCamelCase = """image_segmenter"""
UpperCamelCase = CLIPSegForImageSegmentation
UpperCamelCase = ["""image""", """text"""]
UpperCamelCase = ["""image"""]
def __init__( self :Dict , *lowerCamelCase_ :int , **lowerCamelCase_ :Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :"Image" , lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase_ , return_tensors='''pt''' )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Dict ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = self.model(**lowerCamelCase_ ).logits
return logits
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[Any] = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 705 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : str = (32, 32)
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Tuple = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE : int = DDPMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : Tuple = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCamelCase_ , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE : Any = DDPMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[str] = self.dummy_vae
SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE : Dict = DDPMScheduler()
SCREAMING_SNAKE_CASE : str = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : int = self.dummy_vae
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE : List[str] = unet.half()
SCREAMING_SNAKE_CASE : List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , ).images
SCREAMING_SNAKE_CASE : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
SCREAMING_SNAKE_CASE : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Dict = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Tuple = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Optional[int] = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase = Features({"""text""": Value("""string""" )} )
UpperCamelCase = Features({} )
UpperCamelCase = """text"""
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 707 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Optional[Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["ViTFeatureExtractor"]
lowerCamelCase__ : List[Any] = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ : Dict = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase__ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase__ : Optional[int] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :List[str] ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Any] ) -> Any:
'''simple docstring'''
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str]=0.9 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :str=0.5 ) -> int:
'''simple docstring'''
if NLTK_VERSION >= version.Version('''3.6.5''' ):
SCREAMING_SNAKE_CASE : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) , word_tokenize(lowerCamelCase_ ) , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
else:
SCREAMING_SNAKE_CASE : Dict = [
meteor_score.single_meteor_score(lowerCamelCase_ , lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 709 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __A ( a_ : str , a_ : List[Any] )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = checkpoint
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : int = vae_state_dict['''encoder.conv_in.weight''']
SCREAMING_SNAKE_CASE : List[Any] = vae_state_dict['''encoder.conv_in.bias''']
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict['''encoder.conv_out.weight''']
SCREAMING_SNAKE_CASE : Tuple = vae_state_dict['''encoder.conv_out.bias''']
SCREAMING_SNAKE_CASE : int = vae_state_dict['''encoder.norm_out.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = vae_state_dict['''encoder.norm_out.bias''']
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''decoder.conv_in.weight''']
SCREAMING_SNAKE_CASE : Any = vae_state_dict['''decoder.conv_in.bias''']
SCREAMING_SNAKE_CASE : List[Any] = vae_state_dict['''decoder.conv_out.weight''']
SCREAMING_SNAKE_CASE : Dict = vae_state_dict['''decoder.conv_out.bias''']
SCREAMING_SNAKE_CASE : Union[str, Any] = vae_state_dict['''decoder.norm_out.weight''']
SCREAMING_SNAKE_CASE : Dict = vae_state_dict['''decoder.norm_out.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = vae_state_dict['''quant_conv.weight''']
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''quant_conv.bias''']
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict['''post_quant_conv.weight''']
SCREAMING_SNAKE_CASE : str = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
SCREAMING_SNAKE_CASE : List[Any] = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(a_ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(a_ )
}
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Any = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
SCREAMING_SNAKE_CASE : str = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''old''': F"down.{i}.block", '''new''': F"down_blocks.{i}.resnets"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
SCREAMING_SNAKE_CASE : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE : int = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : str = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE : List[str] = renew_vae_attention_paths(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
conv_attn_to_linear(a_ )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE : Union[str, Any] = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE : Tuple = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
SCREAMING_SNAKE_CASE : List[str] = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : str = {'''old''': F"up.{block_id}.block", '''new''': F"up_blocks.{i}.resnets"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : Dict = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
SCREAMING_SNAKE_CASE : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : List[str] = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE : Dict = renew_vae_attention_paths(a_ )
SCREAMING_SNAKE_CASE : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
conv_attn_to_linear(a_ )
return new_checkpoint
def __A ( a_ : str , a_ : str , )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE : Union[str, Any] = OmegaConf.load(a_ )
SCREAMING_SNAKE_CASE : List[Any] = 5_12
SCREAMING_SNAKE_CASE : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE : Dict = {}
with safe_open(a_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE : Any = f.get_tensor(a_ )
else:
SCREAMING_SNAKE_CASE : Any = torch.load(a_ , map_location=a_ )['''state_dict''']
# Convert the VAE model.
SCREAMING_SNAKE_CASE : List[Any] = create_vae_diffusers_config(a_ , image_size=a_ )
SCREAMING_SNAKE_CASE : Any = custom_convert_ldm_vae_checkpoint(a_ , a_ )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(**a_ )
vae.load_state_dict(a_ )
vae.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowerCamelCase__ : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 710 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """megatron-bert"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Optional[Any]=2_90_56 , lowerCamelCase_ :Tuple=10_24 , lowerCamelCase_ :str=24 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=40_96 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Dict=0.0_2 , lowerCamelCase_ :List[str]=1E-12 , lowerCamelCase_ :str=0 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=True , **lowerCamelCase_ :int , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
| 711 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : int = DDIMScheduler()
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_vq_model
SCREAMING_SNAKE_CASE : Dict = LDMPipeline(unet=lowerCamelCase_ , vqvae=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = ldm(generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = ldm(generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
SCREAMING_SNAKE_CASE : Dict = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = ldm(generator=lowerCamelCase_ , num_inference_steps=5 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
SCREAMING_SNAKE_CASE : str = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 712 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 713 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __A ( a_ : list[int] , a_ : tuple[int, ...] )-> str | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(a_ ) , a_ ):
SCREAMING_SNAKE_CASE : List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a_ )
return decoded
def __A ( a_ : list[int] )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[str] = []
for key in product(a_ , repeat=3 ):
SCREAMING_SNAKE_CASE : Optional[Any] = try_key(a_ , a_ )
if encoded is not None:
possibles.append(a_ )
return possibles
def __A ( a_ : list[str] , a_ : str )-> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __A ( a_ : str = "p059_cipher.txt" )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[int]
SCREAMING_SNAKE_CASE : list[str]
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str = Path(a_ ).parent.joinpath(a_ ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [int(a_ ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE : List[Any] = filter_valid_chars(a_ )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE : Any = filter_common_word(a_ , a_ )
if len(a_ ) == 1:
break
SCREAMING_SNAKE_CASE : Tuple = possibles[0]
return sum(ord(a_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 0 |
"""simple docstring"""
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 1
@register_to_config
def __init__( self :Dict , lowerCamelCase_ :Optional[Any]=20_00 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Dict=20 , lowerCamelCase_ :Union[str, Any]=1E-3 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, torch.device] = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase_ , device=lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ) -> List[Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE : Optional[int] = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE : Dict = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE : Optional[Any] = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = -score / std
# compute
SCREAMING_SNAKE_CASE : Union[str, Any] = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE : Optional[int] = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : int = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sqrt(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE : Optional[Any] = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE : str = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase_ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :Dict ) -> Any:
'''simple docstring'''
return self.config.num_train_timesteps
| 715 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 0 |
"""simple docstring"""
lowerCamelCase__ : Dict = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 716 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ["MaskFormerFeatureExtractor"]
lowerCamelCase__ : Tuple = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
lowerCamelCase__ : int = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 718 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 0 |
"""simple docstring"""
lowerCamelCase__ : List[Any] = 256
# Modulus to hash a string
lowerCamelCase__ : Union[str, Any] = 1000003
def __A ( a_ : str , a_ : str )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = len(a_ )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE : int = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE : Tuple = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''abc1abc12'''
SCREAMING_SNAKE_CASE : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
SCREAMING_SNAKE_CASE : List[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
SCREAMING_SNAKE_CASE : List[str] = '''ABABX'''
SCREAMING_SNAKE_CASE : Optional[int] = '''ABABZABABYABABX'''
assert rabin_karp(a_ , a_ )
# Test 3)
SCREAMING_SNAKE_CASE : List[Any] = '''AAAB'''
SCREAMING_SNAKE_CASE : int = '''ABAAAAAB'''
assert rabin_karp(a_ , a_ )
# Test 4)
SCREAMING_SNAKE_CASE : Tuple = '''abcdabcy'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(a_ , a_ )
# Test 5)
SCREAMING_SNAKE_CASE : List[Any] = '''Lü'''
SCREAMING_SNAKE_CASE : List[str] = '''Lüsai'''
assert rabin_karp(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = '''Lue'''
assert not rabin_karp(a_ , a_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """dpr"""
def __init__( self :Optional[int] , lowerCamelCase_ :Dict=3_05_22 , lowerCamelCase_ :Any=7_68 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :Optional[Any]=1E-12 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :Any="absolute" , lowerCamelCase_ :int = 0 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = projection_dim
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
| 720 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase__ : str = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""input_values""", """attention_mask"""]
def __init__( self :List[Any] , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 1_60_00 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :bool = False , lowerCamelCase_ :int = 80 , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :str = "hann_window" , lowerCamelCase_ :float = 1.0 , lowerCamelCase_ :float = 80 , lowerCamelCase_ :float = 76_00 , lowerCamelCase_ :float = 1E-10 , lowerCamelCase_ :int = 2 , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Dict , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : int = return_attention_mask
SCREAMING_SNAKE_CASE : Tuple = num_mel_bins
SCREAMING_SNAKE_CASE : Tuple = hop_length
SCREAMING_SNAKE_CASE : str = win_length
SCREAMING_SNAKE_CASE : Any = win_function
SCREAMING_SNAKE_CASE : int = frame_signal_scale
SCREAMING_SNAKE_CASE : List[Any] = fmin
SCREAMING_SNAKE_CASE : Optional[int] = fmax
SCREAMING_SNAKE_CASE : Any = mel_floor
SCREAMING_SNAKE_CASE : Optional[Any] = reduction_factor
SCREAMING_SNAKE_CASE : Dict = win_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE : List[str] = hop_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE : str = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE : Optional[int] = (self.n_fft // 2) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCamelCase_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCamelCase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( lowerCamelCase_ :List[np.ndarray] , lowerCamelCase_ :List[np.ndarray] , lowerCamelCase_ :float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Any = np.array(lowerCamelCase_ , np.intaa )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for vector, length in zip(lowerCamelCase_ , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE : str = padding_value
normed_input_values.append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __lowerCAmelCase ( self :str , lowerCamelCase_ :np.ndarray , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
lowerCamelCase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self :Optional[int] , lowerCamelCase_ :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCamelCase_ :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCamelCase_ :Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[int] = None , **lowerCamelCase_ :List[Any] , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
SCREAMING_SNAKE_CASE : int = self._process_audio(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ , )
else:
SCREAMING_SNAKE_CASE : int = None
if audio_target is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._process_audio(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ , )
if inputs is None:
return inputs_target
else:
SCREAMING_SNAKE_CASE : Tuple = inputs_target['''input_values''']
SCREAMING_SNAKE_CASE : Any = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :bool = False , lowerCamelCase_ :Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , **lowerCamelCase_ :Optional[int] , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Tuple = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[int] = speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Any = [speech]
# needed to make pad() work on spectrogram inputs
SCREAMING_SNAKE_CASE : Any = self.feature_size
# convert into correct format for padding
if is_target:
SCREAMING_SNAKE_CASE : Any = [self._extract_mel_features(lowerCamelCase_ ) for waveform in speech]
SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature({'''input_values''': features} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_mel_bins
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({'''input_values''': speech} )
SCREAMING_SNAKE_CASE : Any = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Any = feature_size_hack
# convert input values to correct format
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCamelCase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
SCREAMING_SNAKE_CASE : List[str] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCamelCase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : List[str] = input_values.astype(np.floataa )
# convert attention_mask to correct format
SCREAMING_SNAKE_CASE : Tuple = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
SCREAMING_SNAKE_CASE : str = (
attention_mask
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=lowerCamelCase_ , padding_value=self.padding_value )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Any = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
def __lowerCAmelCase ( self :Optional[int] ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
SCREAMING_SNAKE_CASE : Any = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ : Tuple = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def __A ( a_ : str , a_ : Optional[Any]=1_00 , a_ : Dict=" " )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = text.split(a_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a_ ) , a_ )]
def __A ( a_ : dict )-> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(a_ ):
titles.append(title if title is not None else '''''' )
texts.append(a_ )
return {"title": titles, "text": texts}
def __A ( a_ : dict , a_ : DPRContextEncoder , a_ : DPRContextEncoderTokenizerFast )-> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[Any] = ctx_encoder(input_ids.to(device=a_ ) , return_dict=a_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __A ( a_ : "RagExampleArguments" , a_ : "ProcessingArguments" , a_ : "IndexHnswArguments" , )-> Optional[int]:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE : int = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE : List[str] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE : str = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_ ) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
SCREAMING_SNAKE_CASE : Dict = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(a_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=a_ )
# And save the index
SCREAMING_SNAKE_CASE : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(a_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase__:
UpperCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
UpperCamelCase = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
UpperCamelCase = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
UpperCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class lowercase__:
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
UpperCamelCase = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class lowercase__:
UpperCamelCase = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
UpperCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ : Tuple = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 700 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=3 , lowerCamelCase_ :Optional[Any]=30 , lowerCamelCase_ :Dict=4_00 , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Tuple=1 / 2_55 , lowerCamelCase_ :Any=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE : List[Any] = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean
SCREAMING_SNAKE_CASE : List[str] = image_std
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_pad
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE : Any = image.size
else:
SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : str = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE : Optional[int] = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE : Tuple = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Union[str, Any] = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Union[str, Any] = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE : Dict = []
for image in image_inputs:
SCREAMING_SNAKE_CASE : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0]
SCREAMING_SNAKE_CASE : List[str] = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : int = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE : Any = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
SCREAMING_SNAKE_CASE : List[str] = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase_ ) )
# verify orig_size
SCREAMING_SNAKE_CASE : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase_ ) )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
SCREAMING_SNAKE_CASE : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE : int = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE : str = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase_ ) )
# verify masks
SCREAMING_SNAKE_CASE : Union[str, Any] = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCamelCase_ )
# verify orig_size
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase_ ) )
| 701 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : list )-> float:
'''simple docstring'''
if not nums:
raise ValueError('''List is empty''' )
return sum(a_ ) / len(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 0 |
from math import sqrt
def __A ( a_ : int = 1_00_00_00 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 0 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCamelCase__ : List[str] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCamelCase__ : Any = dataset.iloc[:, 1:2].values
lowerCamelCase__ : Union[str, Any] = dataset.iloc[:, 2].values
lowerCamelCase__ : int = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCamelCase__ : Union[str, Any] = PolynomialFeatures(degree=4)
lowerCamelCase__ : List[str] = poly_reg.fit_transform(X)
lowerCamelCase__ : Tuple = LinearRegression()
pol_reg.fit(X_poly, y)
def __A ( )-> List[Any]:
'''simple docstring'''
plt.scatter(a_ , a_ , color='''red''' )
plt.plot(a_ , pol_reg.predict(poly_reg.fit_transform(a_ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 704 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : list[int | float] , a_ : int , a_ : int )-> int | float:
'''simple docstring'''
if len(a_ ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(a_ )
or left < -len(a_ )
or right >= len(a_ )
or right < -len(a_ )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE : Dict = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE : Union[str, Any] = find_max(a_ , a_ , a_ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE : List[Any] = find_max(a_ , mid + 1 , a_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 705 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __A ( a_ : str , a_ : float | Decimal , a_ : float = 10**-10 )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = a
while True:
SCREAMING_SNAKE_CASE : Tuple = Decimal(a_ ) - (
Decimal(eval(a_ ) ) / Decimal(eval(str(diff(a_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a_ ) ) < precision: # noqa: S307
return float(a_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : Union[str, Any]=None )-> Any:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Optional[int] )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : List[Any] = script_name
else:
SCREAMING_SNAKE_CASE : Dict = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : Optional[Any] = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[Any] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = test_command_parser()
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 708 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 0 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any]=13 , lowerCamelCase_ :int=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :int=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=64 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Union[str, Any]=5 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[Any]=37 , lowerCamelCase_ :str="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :List[str]=3 , lowerCamelCase_ :Optional[Any]=4 , lowerCamelCase_ :Tuple=None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Any , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MegatronBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MegatronBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MegatronBertForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MegatronBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MegatronBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , next_sentence_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = MegatronBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Any = MegatronBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MegatronBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE : str = MegatronBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
# test_resize_embeddings = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int]=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase_ )
def __A ( a_ : int )-> Dict:
'''simple docstring'''
return torch.tensor(
a_ , dtype=torch.long , device=a_ , )
lowerCamelCase__ : str = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(os.environ['''MYDIR'''] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = MegatronBertModel.from_pretrained(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.half()
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE : Optional[Any] = output[0, ii, jj]
SCREAMING_SNAKE_CASE : Optional[Any] = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE : int = '''ii={} jj={} a={} b={}'''.format(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.assertTrue(math.isclose(lowerCamelCase_ , lowerCamelCase_ , rel_tol=lowerCamelCase_ , abs_tol=lowerCamelCase_ ) , msg=lowerCamelCase_ )
| 709 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Tuple = "\\n\n"
lowerCamelCase__ : Optional[Any] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowerCamelCase__ : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int = 16 , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict=None ) -> Tuple:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : int = '''cuda'''
else:
SCREAMING_SNAKE_CASE : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Any = model.config.max_length
SCREAMING_SNAKE_CASE : Dict = tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors='''pt''' , return_attention_mask=lowerCamelCase_ , ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = encodings['''input_ids''']
SCREAMING_SNAKE_CASE : List[str] = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = min(start_index + batch_size , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : str = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCamelCase_ ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE : str = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).logits
SCREAMING_SNAKE_CASE : int = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : str = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Any = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowerCamelCase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase_ )}
| 710 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A ( a_ : str , a_ : str , a_ : Optional[str] = None )-> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE : List[Any] = quote(a_ )
return hfh.hf_hub_url(a_ , a_ , repo_type='''dataset''' , revision=a_ )
| 711 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase__ : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ : List[str] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __A ( )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE : int = bs[:]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a_ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE : Optional[int] = [chr(a_ ) for n in cs]
return dict(zip(a_ , a_ ) )
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : Any = char
return pairs
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :int="replace" , lowerCamelCase_ :Tuple="<s>" , lowerCamelCase_ :List[str]="</s>" , lowerCamelCase_ :List[str]="</s>" , lowerCamelCase_ :Tuple="<s>" , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :Dict="<pad>" , lowerCamelCase_ :Any="<mask>" , lowerCamelCase_ :int=False , **lowerCamelCase_ :Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : str = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : List[str] = bytes_to_unicode()
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE : List[str] = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return len(self.encoder )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :int ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : List[Any] = tuple(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE : List[Any] = bigram
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : str = 0
while i < len(lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : Tuple = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : str = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Any = tuple(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = get_pairs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = ''' '''.join(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for token in re.findall(self.pat , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Optional[int] ) -> Dict:
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
SCREAMING_SNAKE_CASE : Any = 0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :List[Any]=False , **lowerCamelCase_ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : List[Any] = ''' ''' + text
return (text, kwargs)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : List[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : str = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Any = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 712 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mctct"""
def __init__( self :str , lowerCamelCase_ :Dict=80_65 , lowerCamelCase_ :Optional[Any]=15_36 , lowerCamelCase_ :int=36 , lowerCamelCase_ :List[str]=61_44 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :Tuple=3_84 , lowerCamelCase_ :str=9_20 , lowerCamelCase_ :List[str]=1E-5 , lowerCamelCase_ :Optional[int]=0.3 , lowerCamelCase_ :str="relu" , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :str=0.3 , lowerCamelCase_ :Union[str, Any]=0.3 , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :str=2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Union[str, Any]=0.3 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Optional[int]=(7,) , lowerCamelCase_ :List[str]=(3,) , lowerCamelCase_ :Dict=80 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Any=None , lowerCamelCase_ :str="sum" , lowerCamelCase_ :List[Any]=False , **lowerCamelCase_ :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : str = layerdrop
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
SCREAMING_SNAKE_CASE : int = conv_glu_dim
SCREAMING_SNAKE_CASE : str = conv_dropout
SCREAMING_SNAKE_CASE : Tuple = num_conv_layers
SCREAMING_SNAKE_CASE : int = input_feat_per_channel
SCREAMING_SNAKE_CASE : Optional[Any] = input_channels
SCREAMING_SNAKE_CASE : Any = conv_channels
SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE : Dict = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCamelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 713 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 0 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCamelCase__ : Any = get_logger(__name__)
lowerCamelCase__ : Dict = Path(__file__).parent / "model_card_template.md"
lowerCamelCase__ : Optional[int] = uuida().hex
lowerCamelCase__ : Tuple = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase__ : Any = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def __A ( a_ : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_ ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(a_ , a_ ):
ua += "; " + user_agent
return ua
def __A ( a_ : str , a_ : Optional[str] = None , a_ : Optional[str] = None )-> str:
'''simple docstring'''
if token is None:
SCREAMING_SNAKE_CASE : Tuple = HfFolder.get_token()
if organization is None:
SCREAMING_SNAKE_CASE : Any = whoami(a_ )['''name''']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def __A ( a_ : Union[str, Any] , a_ : str )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(a_ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
SCREAMING_SNAKE_CASE : Any = args.hub_token if hasattr(a_ , '''hub_token''' ) else None
SCREAMING_SNAKE_CASE : Optional[int] = get_full_repo_name(a_ , token=a_ )
SCREAMING_SNAKE_CASE : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.output_dir , '''README.md''' )
model_card.save(a_ )
def __A ( a_ : Optional[str] , a_ : Optional[str] = None )-> int:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
SCREAMING_SNAKE_CASE : str = str(Path(a_ ).as_posix() )
SCREAMING_SNAKE_CASE : Any = re.search(r'''snapshots/([^/]+)/''' , a_ )
if search is None:
return None
SCREAMING_SNAKE_CASE : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCamelCase__ : Optional[Any] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
lowerCamelCase__ : List[Any] = os.path.join(hf_cache_home, "diffusers")
def __A ( a_ : Optional[str] = None , a_ : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
SCREAMING_SNAKE_CASE : Optional[Any] = old_diffusers_cache
SCREAMING_SNAKE_CASE : Optional[Any] = Path(a_ ).expanduser()
SCREAMING_SNAKE_CASE : List[Any] = Path(a_ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
SCREAMING_SNAKE_CASE : Dict = new_cache_dir / old_blob_path.relative_to(a_ )
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_ )
os.replace(a_ , a_ )
try:
os.symlink(a_ , a_ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCamelCase__ : int = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
lowerCamelCase__ : Tuple = 0
else:
with open(cache_version_file) as f:
try:
lowerCamelCase__ : str = int(f.read())
except ValueError:
lowerCamelCase__ : Any = 0
if cache_version < 1:
lowerCamelCase__ : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
lowerCamelCase__ : List[Any] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"the directory exists and can be written to."
)
def __A ( a_ : str , a_ : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
SCREAMING_SNAKE_CASE : Optional[int] = weights_name.split('''.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
SCREAMING_SNAKE_CASE : List[str] = '''.'''.join(a_ )
return weights_name
def __A ( a_ : int , *,
a_ : List[str] , a_ : List[str] , a_ : Tuple , a_ : Optional[int] , a_ : int , a_ : int , a_ : Tuple , a_ : Union[str, Any] , a_ : Dict , a_ : Dict , a_ : int=None , )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = str(a_ )
if os.path.isfile(a_ ):
return pretrained_model_name_or_path
elif os.path.isdir(a_ ):
if os.path.isfile(os.path.join(a_ , a_ ) ):
# Load from a PyTorch checkpoint
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a_ , a_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_ ) ):
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a_ , a_ , a_ )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
a_ , filename=_add_variant(a_ , a_ ) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_ )}' so that the correct variant file can be added." , a_ , )
try:
# 2. Load model file as usual
SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 714 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 715 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 0 |
"""simple docstring"""
def __A ( a_ : int , a_ : int )-> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = str(bin(a_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(a_ ) )[2:]
SCREAMING_SNAKE_CASE : int = max(len(a_ ) , len(a_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(a_ ) , b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCamelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.task_name.lower()
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """train"""
UpperCamelCase = """dev"""
UpperCamelCase = """test"""
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :GlueDataTrainingArguments , lowerCamelCase_ :PreTrainedTokenizerBase , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Union[str, Split] = Split.train , lowerCamelCase_ :Optional[str] = None , ) -> int:
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = args
SCREAMING_SNAKE_CASE : Any = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : int = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : List[str] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
SCREAMING_SNAKE_CASE : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE : Dict = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Dict = cached_features_file + '''.lock'''
with FileLock(lowerCamelCase_ ):
if os.path.exists(lowerCamelCase_ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : Dict = time.time()
SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowerCamelCase_ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : List[str] = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : Tuple = examples[:limit_length]
SCREAMING_SNAKE_CASE : Optional[int] = glue_convert_examples_to_features(
lowerCamelCase_ , lowerCamelCase_ , max_length=args.max_seq_length , label_list=lowerCamelCase_ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE : List[Any] = time.time()
torch.save(self.features , lowerCamelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self :List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self :List[Any] , lowerCamelCase_ :Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.label_list
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __A ( a_ : List[str] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] )-> Any:
'''simple docstring'''
with open(a_ ) as metadata_file:
SCREAMING_SNAKE_CASE : List[str] = json.load(a_ )
SCREAMING_SNAKE_CASE : Dict = LukeConfig(use_entity_aware_attention=a_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE : List[str] = torch.load(a_ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE : int = load_original_entity_vocab(a_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE : Optional[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE : Any = AddedToken('''<ent>''' , lstrip=a_ , rstrip=a_ )
SCREAMING_SNAKE_CASE : Any = AddedToken('''<ent2>''' , lstrip=a_ , rstrip=a_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(a_ )
with open(os.path.join(a_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Any = json.load(a_ )
SCREAMING_SNAKE_CASE : Dict = '''MLukeTokenizer'''
with open(os.path.join(a_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
with open(os.path.join(a_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = MLukeTokenizer.from_pretrained(a_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[bias_name]
SCREAMING_SNAKE_CASE : Dict = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = F"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE : str = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE : int = state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE : str = state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE : str = LukeForMaskedLM(config=a_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[key]
else:
SCREAMING_SNAKE_CASE : str = state_dict[key]
SCREAMING_SNAKE_CASE : Optional[int] = model.load_state_dict(a_ , strict=a_ )
if set(a_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(a_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE : Any = MLukeTokenizer.from_pretrained(a_ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE : List[Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE : int = (0, 9)
SCREAMING_SNAKE_CASE : int = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Any = model(**a_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE : Any = MLukeTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE : List[Any] = (24, 30)
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = model(**a_ )
SCREAMING_SNAKE_CASE : Dict = encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE : str = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(a_ )
SCREAMING_SNAKE_CASE : Any = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(a_ ) )
model.save_pretrained(a_ )
def __A ( a_ : Tuple )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE : List[Any] = [json.loads(a_ ) for line in open(a_ )]
SCREAMING_SNAKE_CASE : str = {}
for entry in data:
SCREAMING_SNAKE_CASE : Optional[int] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE : int = entity_id
break
SCREAMING_SNAKE_CASE : List[str] = F"{language}:{entity_name}"
SCREAMING_SNAKE_CASE : Dict = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 718 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 0 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 720 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 0 |
"""simple docstring"""
from typing import Any
class lowercase__:
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = data
SCREAMING_SNAKE_CASE : Dict = None
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = None
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.head
while temp is not None:
print(temp.data , end=''' ''' )
SCREAMING_SNAKE_CASE : Dict = temp.next
print()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Node(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.head
SCREAMING_SNAKE_CASE : Optional[Any] = new_node
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] ) -> Any:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Optional[Any] = node_a.next
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase__ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
UpperCamelCase = LEDTokenizer
UpperCamelCase = LEDTokenizerFast
UpperCamelCase = True
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE : List[str] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE : str = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :int , **lowerCamelCase_ :Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , **lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :int ) -> Optional[int]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : str = tokenizer(lowerCamelCase_ , max_length=len(lowerCamelCase_ ) , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertNotIn('''labels''' , lowerCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : int = tokenizer(text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ['''A long paragraph for summarization.''']
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = tokenizer(text_target=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : str = inputs['''input_ids''']
SCREAMING_SNAKE_CASE : List[str] = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Any = ['''Summary of the text.''', '''Another summary.''']
SCREAMING_SNAKE_CASE : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [[0] * len(lowerCamelCase_ ) for x in encoded_output['''input_ids''']]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.pad(lowerCamelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = '''A, <mask> AllenNLP sentence.'''
SCREAMING_SNAKE_CASE : str = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 700 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 0 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCamelCase__ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , *lowerCamelCase_ :Any , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples
SCREAMING_SNAKE_CASE : Any = post_process_function
SCREAMING_SNAKE_CASE : Dict = quant_trainer_args
SCREAMING_SNAKE_CASE : List[Any] = 1_28 # default number of calibration samples
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Dict=None ) -> Optional[Any]:
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE : Tuple = self._remove_unused_columns(lowerCamelCase_ , description='''Calibration''' )
return DataLoader(
lowerCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCamelCase_ , )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :int=None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE : List[Any] = self.get_calib_dataloader(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.model
quant_trainer.configure_model(lowerCamelCase_ , self.quant_trainer_args , calib=lowerCamelCase_ )
model.eval()
quant_trainer.enable_calibration(lowerCamelCase_ )
logger.info('''***** Running calibration *****''' )
logger.info(f" Num examples = {self.calib_num}" )
logger.info(f" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(lowerCamelCase_ ):
# Prediction step
SCREAMING_SNAKE_CASE : Dict = self.prediction_step(lowerCamelCase_ , lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCamelCase_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE : Union[str, Any] = model
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :str = "eval" ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : Tuple = self.get_eval_dataloader(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Union[str, Any] = self.compute_metrics
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Dict = eval_loop(
lowerCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , )
finally:
SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions )
SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE : List[Any] = metrics.pop(lowerCamelCase_ )
self.log(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any=None , lowerCamelCase_ :str = "test" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : List[Any] = self.compute_metrics
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : int = eval_loop(
lowerCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , )
finally:
SCREAMING_SNAKE_CASE : int = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : int = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions , '''predict''' )
SCREAMING_SNAKE_CASE : int = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE : Optional[Any] = metrics.pop(lowerCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Any="./" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.eval_dataset
SCREAMING_SNAKE_CASE : List[Any] = self.get_eval_dataloader(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(lowerCamelCase_ ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(v.to(lowerCamelCase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Any = self.model.to(lowerCamelCase_ )
model.eval()
model.float()
SCREAMING_SNAKE_CASE : Tuple = model.module if hasattr(lowerCamelCase_ , '''module''' ) else model
quant_trainer.configure_model(lowerCamelCase_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(lowerCamelCase_ , '''model.onnx''' )
logger.info(f"exporting model to {output_model_file}" )
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , export_params=lowerCamelCase_ , opset_version=13 , do_constant_folding=lowerCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=lowerCamelCase_ , )
logger.info('''onnx export finished''' )
| 701 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """trocr"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self :Tuple , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=16 , lowerCamelCase_ :int=40_96 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[Any]=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :int=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[str]=2 , **lowerCamelCase_ :Any , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : List[str] = decoder_layers
SCREAMING_SNAKE_CASE : Any = decoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Dict = init_std
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : List[str] = scale_embedding
SCREAMING_SNAKE_CASE : str = use_learned_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 702 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : str = 250004
lowerCamelCase__ : Dict = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''<s>'''
SCREAMING_SNAKE_CASE : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : Optional[int] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Optional[int] = 1
return cls
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : Any = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = 10
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Any = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 703 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self :int , **lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase_ )
return config
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1E-5
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE : List[str] = samplea.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = torch.arange(lowerCamelCase_ )[0:3, None].repeat(1 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : List[str] = scheduler.batch_step_no_noise(lowerCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def __lowerCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
SCREAMING_SNAKE_CASE : List[str] = -1
else:
SCREAMING_SNAKE_CASE : int = timesteps[i + 1]
SCREAMING_SNAKE_CASE : List[Any] = scheduler.previous_timestep(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = prev_t.item()
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = [1_00, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 704 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCamelCase__ : Optional[int] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
lowerCamelCase__ : Union[str, Any] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
lowerCamelCase__ : Any = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : Tuple = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : List[Any] = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 705 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = torch.device("cpu")
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
def __A ( a_ : Any )-> Tuple:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def __A ( a_ : Any , a_ : Union[str, Any] , a_ : Any )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(a_ )
SCREAMING_SNAKE_CASE : Any = val
def __A ( a_ : List[str] )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE : Any = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE : Dict = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE : Any = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
SCREAMING_SNAKE_CASE : Any = k_new.split('''.''' )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE : int = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
SCREAMING_SNAKE_CASE : Any = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __A ( a_ : str , a_ : Optional[int] , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE : Tuple = 10_00
SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : int = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE : Optional[int] = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE : Optional[int] = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE : Dict = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE : Tuple = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE : Dict = [4, 3, 10, 5]
SCREAMING_SNAKE_CASE : str = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE : str = [4, 4, 12, 6]
SCREAMING_SNAKE_CASE : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
SCREAMING_SNAKE_CASE : Any = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''' , check_hash=a_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.load(a_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = checkpoint
SCREAMING_SNAKE_CASE : int = create_rename_keys(a_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a_ , a_ , a_ )
# load HuggingFace model
SCREAMING_SNAKE_CASE : Dict = SwiftFormerForImageClassification(a_ ).eval()
hf_model.load_state_dict(a_ )
# prepare test inputs
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
SCREAMING_SNAKE_CASE : List[str] = processor(images=a_ , return_tensors='''pt''' )
# compare outputs from both models
SCREAMING_SNAKE_CASE : List[str] = get_expected_output(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , a_ , atol=1E-3 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 706 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.