code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase ( _a ):
'''simple docstring'''
a_ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
a_ = field(default=_a , metadata={"""help""": """Whether to SortishSamler or not."""} )
a_ = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a_ = field(default=_a , metadata={"""help""": """whether to use adafactor"""} )
a_ = field(
default=_a , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
a_ = field(
default=_a , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
a_ = field(default=_a , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
a_ = field(
default=_a , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
a_ = field(
default="""linear""" , metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
import operator as op
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : int = []
A_ : Dict = lambda lowerCamelCase , lowerCamelCase: int(x / y) # noqa: E731 integer division operation
A_ : Dict = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8) , """Action""".center(12) , """Stack""" , sep=""" | """)
print("""-""" * (30 + len(UpperCAmelCase__)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCAmelCase__) # append x to stack
# output in tabular format
print(x.rjust(8) , ("""push(""" + x + """)""").ljust(12) , """,""".join(UpperCAmelCase__) , sep=""" | """)
else:
A_ : str = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8) , ("""pop(""" + b + """)""").ljust(12) , """,""".join(UpperCAmelCase__) , sep=""" | """)
A_ : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8) , ("""pop(""" + a + """)""").ljust(12) , """,""".join(UpperCAmelCase__) , sep=""" | """)
stack.append(
str(opr[x](int(UpperCAmelCase__) , int(UpperCAmelCase__)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ("""push(""" + a + x + b + """)""").ljust(12) , """,""".join(UpperCAmelCase__) , sep=""" | """ , )
return int(stack[0])
if __name__ == "__main__":
__magic_name__ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = XLMTokenizer
a_ = False
def _a ( self : str ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : Tuple = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file ,"""w""" ) as fp:
fp.write("""\n""".join(_a ) )
def _a ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
A_ : Dict = """lower newer"""
A_ : List[str] = """lower newer"""
return input_text, output_text
def _a ( self : str ):
'''simple docstring'''
A_ : Union[str, Any] = XLMTokenizer(self.vocab_file ,self.merges_file )
A_ : Optional[int] = """lower"""
A_ : List[Any] = ["""low""", """er</w>"""]
A_ : Dict = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
A_ : int = tokens + ["""<unk>"""]
A_ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
@slow
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
A_ : Dict = tokenizer.encode("""sequence builders""" ,add_special_tokens=_a )
A_ : str = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_a )
A_ : str = tokenizer.build_inputs_with_special_tokens(_a )
A_ : List[str] = tokenizer.build_inputs_with_special_tokens(_a ,_a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """BlipImageProcessor"""
a_ = """AutoTokenizer"""
def __init__( self : str ,_a : Any ,_a : str ):
'''simple docstring'''
A_ : int = False
super().__init__(_a ,_a )
A_ : List[str] = self.image_processor
def __call__( self : List[str] ,_a : int = None ,_a : str = None ,_a : Optional[Any] = True ,_a : List[str] = False ,_a : Union[str, Any] = None ,_a : Any = None ,_a : List[Any] = 0 ,_a : Optional[Any] = None ,_a : Optional[int] = None ,_a : List[str] = False ,_a : Any = False ,_a : Tuple = False ,_a : int = False ,_a : Dict = False ,_a : List[str] = True ,_a : Union[str, Any] = None ,**_a : List[Any] ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : Union[str, Any] = self.tokenizer
A_ : List[str] = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
return text_encoding
# add pixel_values
A_ : Any = self.image_processor(_a ,return_tensors=_a )
if text is not None:
A_ : Any = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
else:
A_ : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def _a ( self : Optional[Any] ,*_a : Tuple ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : Optional[int] ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _a ( self : str ):
'''simple docstring'''
A_ : Any = self.tokenizer.model_input_names
A_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
'''simple docstring'''
return 1 / (1 + np.exp(-z))
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Dict):
'''simple docstring'''
return (-y * np.log(lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]):
'''simple docstring'''
A_ : List[str] = np.dot(lowerCamelCase , lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(lowerCamelCase)))
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any]=7_0000):
'''simple docstring'''
A_ : Union[str, Any] = np.zeros(x.shape[1])
for iterations in range(lowerCamelCase):
A_ : Tuple = np.dot(lowerCamelCase , lowerCamelCase)
A_ : str = sigmoid_function(lowerCamelCase)
A_ : str = np.dot(x.T , h - y) / y.size
A_ : Any = theta - alpha * gradient # updating the weights
A_ : List[str] = np.dot(lowerCamelCase , lowerCamelCase)
A_ : Dict = sigmoid_function(lowerCamelCase)
A_ : str = cost_function(lowerCamelCase , lowerCamelCase)
if iterations % 100 == 0:
print(F'loss: {j} \t') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__magic_name__ = datasets.load_iris()
__magic_name__ = iris.data[:, :2]
__magic_name__ = (iris.target != 0) * 1
__magic_name__ = 0.1
__magic_name__ = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def lowerCamelCase ( lowerCamelCase : str):
'''simple docstring'''
return sigmoid_function(
np.dot(lowerCamelCase , lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
(__magic_name__) = (x[:, 0].min(), x[:, 0].max())
(__magic_name__) = (x[:, 1].min(), x[:, 1].max())
(__magic_name__) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__magic_name__ = np.c_[xxa.ravel(), xxa.ravel()]
__magic_name__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
__magic_name__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__magic_name__ = f"""down_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__magic_name__ = f"""down_blocks.{i}.attentions.{j}."""
__magic_name__ = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__magic_name__ = f"""up_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__magic_name__ = f"""up_blocks.{i}.attentions.{j}."""
__magic_name__ = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__magic_name__ = f"""down_blocks.{i}.downsamplers.0.conv."""
__magic_name__ = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__magic_name__ = f"""up_blocks.{i}.upsamplers.0."""
__magic_name__ = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__magic_name__ = 'mid_block.attentions.0.'
__magic_name__ = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__magic_name__ = f"""mid_block.resnets.{j}."""
__magic_name__ = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase ( lowerCamelCase : int):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
A_ : Any = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
A_ : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
A_ : Union[str, Any] = v.replace(snake_case_ , snake_case_)
A_ : Tuple = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
A_ : Tuple = v.replace(snake_case_ , snake_case_)
A_ : Any = v
A_ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__magic_name__ = f"""encoder.down_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__magic_name__ = f"""down_blocks.{i}.downsamplers.0."""
__magic_name__ = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__magic_name__ = f"""up_blocks.{i}.upsamplers.0."""
__magic_name__ = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__magic_name__ = f"""decoder.up_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__magic_name__ = f"""mid_block.resnets.{i}."""
__magic_name__ = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1)
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
A_ : str = v.replace(snake_case_ , snake_case_)
A_ : List[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
A_ : List[Any] = v.replace(snake_case_ , snake_case_)
A_ : Tuple = v
A_ : List[Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
A_ : int = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format')
A_ : Tuple = reshape_weight_for_sd(snake_case_)
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
__magic_name__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__magic_name__ = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__magic_name__ = {'q': 0, 'k': 1, 'v': 2}
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Optional[int] = {}
A_ : Optional[Any] = {}
A_ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""")
or k.endswith(""".self_attn.k_proj.weight""")
or k.endswith(""".self_attn.v_proj.weight""")
):
A_ : Optional[int] = k[: -len(""".q_proj.weight""")]
A_ : Union[str, Any] = k[-len("""q_proj.weight""")]
if k_pre not in capture_qkv_weight:
A_ : Optional[Any] = [None, None, None]
A_ : List[Any] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""")
or k.endswith(""".self_attn.k_proj.bias""")
or k.endswith(""".self_attn.v_proj.bias""")
):
A_ : List[Any] = k[: -len(""".q_proj.bias""")]
A_ : Any = k[-len("""q_proj.bias""")]
if k_pre not in capture_qkv_bias:
A_ : Dict = [None, None, None]
A_ : Optional[Any] = v
continue
A_ : Optional[Any] = textenc_pattern.sub(lambda lowerCamelCase: protected[re.escape(m.group(0))] , snake_case_)
A_ : Tuple = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""")
A_ : str = textenc_pattern.sub(lambda lowerCamelCase: protected[re.escape(m.group(0))] , snake_case_)
A_ : Tuple = torch.cat(snake_case_)
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""")
A_ : List[str] = textenc_pattern.sub(lambda lowerCamelCase: protected[re.escape(m.group(0))] , snake_case_)
A_ : List[Any] = torch.cat(snake_case_)
return new_state_dict
def lowerCamelCase ( lowerCamelCase : str):
return text_enc_dict
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
__magic_name__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__magic_name__ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
__magic_name__ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
__magic_name__ = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__magic_name__ = load_file(unet_path, device='cpu')
else:
__magic_name__ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
__magic_name__ = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
__magic_name__ = load_file(vae_path, device='cpu')
else:
__magic_name__ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
__magic_name__ = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
__magic_name__ = load_file(text_enc_path, device='cpu')
else:
__magic_name__ = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
__magic_name__ = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
__magic_name__ = convert_unet_state_dict(unet_state_dict)
__magic_name__ = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__magic_name__ = convert_vae_state_dict(vae_state_dict)
__magic_name__ = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__magic_name__ = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__magic_name__ = {'transformer.' + k: v for k, v in text_enc_dict.items()}
__magic_name__ = convert_text_enc_state_dict_vaa(text_enc_dict)
__magic_name__ = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
__magic_name__ = convert_text_enc_state_dict(text_enc_dict)
__magic_name__ = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__magic_name__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__magic_name__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__magic_name__ = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : str ,_a : str ,_a : Optional[int] ,_a : str ,_a : Tuple = None ,):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_a ,vae=_a ,scheduler=_a )
# create a imagenet -> id dictionary for easier use
A_ : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Tuple = int(_a )
A_ : Dict = dict(sorted(self.labels.items() ) )
def _a ( self : Any ,_a : Optional[Any] ):
'''simple docstring'''
if not isinstance(_a ,_a ):
A_ : Union[str, Any] = list(_a )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[Any] ,_a : Optional[Any] ,_a : Optional[int] = 4.0 ,_a : Optional[int] = None ,_a : int = 50 ,_a : List[str] = "pil" ,_a : Optional[Any] = True ,):
'''simple docstring'''
A_ : List[Any] = len(_a )
A_ : Any = self.transformer.config.sample_size
A_ : Union[str, Any] = self.transformer.config.in_channels
A_ : int = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=_a ,device=self.device ,dtype=self.transformer.dtype ,)
A_ : Dict = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Tuple = torch.tensor(_a ,device=self.device ).reshape(-1 )
A_ : List[str] = torch.tensor([1000] * batch_size ,device=self.device )
A_ : Union[str, Any] = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : Optional[int] = latent_model_input[: len(_a ) // 2]
A_ : str = torch.cat([half, half] ,dim=0 )
A_ : Union[str, Any] = self.scheduler.scale_model_input(_a ,_a )
A_ : List[str] = t
if not torch.is_tensor(_a ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Union[str, Any] = latent_model_input.device.type == """mps"""
if isinstance(_a ,_a ):
A_ : Dict = torch.floataa if is_mps else torch.floataa
else:
A_ : List[str] = torch.intaa if is_mps else torch.intaa
A_ : Optional[Any] = torch.tensor([timesteps] ,dtype=_a ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : Tuple = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : str = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : str = self.transformer(
_a ,timestep=_a ,class_labels=_a ).sample
# perform guidance
if guidance_scale > 1:
A_ , A_ : Tuple = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_ , A_ : Union[str, Any] = torch.split(_a ,len(_a ) // 2 ,dim=0 )
A_ : Union[str, Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : Optional[int] = torch.cat([half_eps, half_eps] ,dim=0 )
A_ : int = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_ , A_ : int = torch.split(_a ,_a ,dim=1 )
else:
A_ : Union[str, Any] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Dict = self.scheduler.step(_a ,_a ,_a ).prev_sample
if guidance_scale > 1:
A_ , A_ : Optional[int] = latent_model_input.chunk(2 ,dim=0 )
else:
A_ : Optional[Any] = latent_model_input
A_ : List[str] = 1 / self.vae.config.scaling_factor * latents
A_ : int = self.vae.decode(_a ).sample
A_ : Union[str, Any] = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : str = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
A_ : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_a )
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = SwinConfig()
A_ : Optional[Any] = swin_name.split("""_""")
A_ : int = name_split[1]
A_ : Optional[Any] = int(name_split[4])
A_ : int = int(name_split[3][-1])
if model_size == "tiny":
A_ : str = 96
A_ : Any = (2, 2, 6, 2)
A_ : Tuple = (3, 6, 12, 24)
elif model_size == "small":
A_ : List[str] = 96
A_ : int = (2, 2, 18, 2)
A_ : str = (3, 6, 12, 24)
elif model_size == "base":
A_ : List[Any] = 128
A_ : Optional[int] = (2, 2, 18, 2)
A_ : str = (4, 8, 16, 32)
else:
A_ : Optional[Any] = 192
A_ : Any = (2, 2, 18, 2)
A_ : Dict = (6, 12, 24, 48)
if "in22k" in swin_name:
A_ : Any = 2_1841
else:
A_ : str = 1000
A_ : Optional[int] = """huggingface/label-files"""
A_ : Tuple = """imagenet-1k-id2label.json"""
A_ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase__): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : int = img_size
A_ : Dict = num_classes
A_ : Tuple = embed_dim
A_ : int = depths
A_ : List[str] = num_heads
A_ : Union[str, Any] = window_size
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
if "patch_embed.proj" in name:
A_ : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
A_ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""")
if "layers" in name:
A_ : List[str] = """encoder.""" + name
if "attn.proj" in name:
A_ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""")
if "attn" in name:
A_ : Optional[int] = name.replace("""attn""" , """attention.self""")
if "norm1" in name:
A_ : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name:
A_ : List[str] = name.replace("""norm2""" , """layernorm_after""")
if "mlp.fc1" in name:
A_ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
A_ : Dict = name.replace("""mlp.fc2""" , """output.dense""")
if name == "norm.weight":
A_ : List[Any] = """layernorm.weight"""
if name == "norm.bias":
A_ : Optional[int] = """layernorm.bias"""
if "head" in name:
A_ : Tuple = name.replace("""head""" , """classifier""")
else:
A_ : Dict = """swin.""" + name
return name
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]):
for key in orig_state_dict.copy().keys():
A_ : Dict = orig_state_dict.pop(lowerCamelCase__)
if "mask" in key:
continue
elif "qkv" in key:
A_ : Dict = key.split(""".""")
A_ : List[str] = int(key_split[1])
A_ : List[Any] = int(key_split[3])
A_ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A_ : Any = val[:dim, :]
A_ : List[Any] = val[
dim : dim * 2, :
]
A_ : List[str] = val[-dim:, :]
else:
A_ : str = val[
:dim
]
A_ : Optional[int] = val[
dim : dim * 2
]
A_ : List[str] = val[
-dim:
]
else:
A_ : Tuple = val
return orig_state_dict
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Dict):
A_ : Any = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__)
timm_model.eval()
A_ : Optional[int] = get_swin_config(lowerCamelCase__)
A_ : int = SwinForImageClassification(lowerCamelCase__)
model.eval()
A_ : Optional[Any] = convert_state_dict(timm_model.state_dict() , lowerCamelCase__)
model.load_state_dict(lowerCamelCase__)
A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""")))
A_ : str = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__).raw)
A_ : int = image_processor(images=lowerCamelCase__ , return_tensors="""pt""")
A_ : Union[str, Any] = timm_model(inputs["""pixel_values"""])
A_ : int = model(**lowerCamelCase__).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3)
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase__)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase__)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__magic_name__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import math
def lowerCamelCase ( lowerCamelCase : int):
A_ : str = math.loga(math.sqrt(4 * positive_integer + 1) / 2 + 1 / 2)
return exponent == int(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : float = 1 / 1_2345):
A_ : int = 0
A_ : Dict = 0
A_ : Tuple = 3
while True:
A_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase):
A_ : Union[str, Any] = int(lowerCamelCase)
total_partitions += 1
if check_partition_perfect(lowerCamelCase):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase)
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( lowerCamelCase : Optional[int]=None):
if subparsers is not None:
A_ : Tuple = subparsers.add_parser("""env""")
else:
A_ : Dict = argparse.ArgumentParser("""Accelerate env command""")
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help="""The config file to use for the default values in the launching script.""")
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase)
return parser
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[int] = torch.__version__
A_ : Optional[int] = torch.cuda.is_available()
A_ : List[str] = is_xpu_available()
A_ : Union[str, Any] = is_npu_available()
A_ : Optional[Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase):
A_ : Dict = load_config_from_file(args.config_file).to_dict()
A_ : Tuple = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase),
"""PyTorch NPU available""": str(lowerCamelCase),
"""System RAM""": F'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
A_ : Tuple = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""")
print("""\n""".join([F'- {prop}: {val}' for prop, val in info.items()]))
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""")
A_ : List[Any] = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()])
if isinstance(lowerCamelCase , lowerCamelCase)
else F'\t{accelerate_config}'
)
print(lowerCamelCase)
A_ : Optional[Any] = accelerate_config
return info
def lowerCamelCase ( ):
A_ : Dict = env_command_parser()
A_ : Dict = parser.parse_args()
env_command(lowerCamelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Tuple ,_a : bool = True ,_a : int = 32 ,_a : List[Any]=PILImageResampling.BILINEAR ,_a : bool = True ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize
A_ : List[Any] = do_rescale
A_ : int = size_divisor
A_ : Optional[Any] = resample
super().__init__(**_a )
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : int ,_a : Any ,_a : Optional[ChannelDimension] = None ,**_a : List[str] ):
'''simple docstring'''
A_ : List[Any] = get_image_size(_a )
# Rounds the height and width down to the closest multiple of size_divisor
A_ : str = height // size_divisor * size_divisor
A_ : Optional[Any] = width // size_divisor * size_divisor
A_ : List[str] = resize(_a ,(new_h, new_w) ,resample=_a ,data_format=_a ,**_a )
return image
def _a ( self : List[Any] ,_a : np.ndarray ,_a : float ,_a : Optional[ChannelDimension] = None ,**_a : int ):
'''simple docstring'''
return rescale(image=_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Union[str, Any] ,_a : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,_a : Optional[bool] = None ,_a : Optional[int] = None ,_a : Optional[Any]=None ,_a : Optional[bool] = None ,_a : Optional[Union[TensorType, str]] = None ,_a : ChannelDimension = ChannelDimension.FIRST ,**_a : List[str] ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
A_ : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
A_ : Optional[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
A_ : Optional[Any] = [to_numpy_array(_a ) for img in images]
if do_resize:
A_ : Any = [self.resize(_a ,size_divisor=_a ,resample=_a ) for image in images]
if do_rescale:
A_ : Dict = [self.rescale(_a ,scale=1 / 255 ) for image in images]
A_ : Optional[Any] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
A_ : List[Any] = (boundary[1] - boundary[0]) / steps
A_ : Optional[Any] = boundary[0]
A_ : List[Any] = boundary[1]
A_ : Optional[int] = make_points(lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : Union[str, Any] = 0.0
y += (h / 2.0) * f(lowerCamelCase)
for i in x_i:
# print(i)
y += h * f(lowerCamelCase)
y += (h / 2.0) * f(lowerCamelCase)
return y
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict):
A_ : List[Any] = a + h
while x < (b - h):
yield x
A_ : str = x + h
def lowerCamelCase ( lowerCamelCase : Union[str, Any]): # enter your function here
A_ : Tuple = (x - 0) * (x - 0)
return y
def lowerCamelCase ( ):
A_ : int = 0.0 # Lower bound of integration
A_ : Optional[Any] = 1.0 # Upper bound of integration
A_ : Optional[Any] = 10.0 # define number of steps or resolution
A_ : Optional[Any] = [a, b] # define boundary of integration
A_ : Tuple = method_a(lowerCamelCase , lowerCamelCase)
print(F'y = {y}')
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Tuple = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" ,from_pt=_a ,dtype=jnp.bfloataa )
A_ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,controlnet=_a ,from_pt=_a ,dtype=jnp.bfloataa )
A_ : List[str] = controlnet_params
A_ : Any = """bird"""
A_ : str = jax.device_count()
A_ : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
A_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
A_ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
A_ : Optional[Any] = jax.random.PRNGKey(0 )
A_ : Union[str, Any] = jax.random.split(_a ,jax.device_count() )
A_ : int = replicate(_a )
A_ : List[str] = shard(_a )
A_ : str = shard(_a )
A_ : Optional[Any] = pipe(
prompt_ids=_a ,image=_a ,params=_a ,prng_seed=_a ,num_inference_steps=50 ,jit=_a ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : Union[str, Any] = images[0, 253:256, 253:256, -1]
A_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : Optional[int] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" ,from_pt=_a ,dtype=jnp.bfloataa )
A_ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,controlnet=_a ,from_pt=_a ,dtype=jnp.bfloataa )
A_ : Tuple = controlnet_params
A_ : int = """Chef in the kitchen"""
A_ : Any = jax.device_count()
A_ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
A_ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
A_ : Optional[Any] = jax.random.PRNGKey(0 )
A_ : Dict = jax.random.split(_a ,jax.device_count() )
A_ : Any = replicate(_a )
A_ : Union[str, Any] = shard(_a )
A_ : int = shard(_a )
A_ : str = pipe(
prompt_ids=_a ,image=_a ,params=_a ,prng_seed=_a ,num_inference_steps=50 ,jit=_a ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : Union[str, Any] = images[0, 253:256, 253:256, -1]
A_ : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : Optional[Any] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
import math
def lowerCamelCase ( lowerCamelCase : int):
A_ : Dict = 0
A_ : str = 0
while num > 0:
A_ : List[str] = num % 8
A_ : Any = octal + (remainder * math.floor(math.pow(10 , lowerCamelCase)))
counter += 1
A_ : Dict = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(lowerCamelCase)}'
def lowerCamelCase ( ):
print("""\n2 in octal is:""")
print(decimal_to_octal(2)) # = 2
print("""\n8 in octal is:""")
print(decimal_to_octal(8)) # = 10
print("""\n65 in octal is:""")
print(decimal_to_octal(65)) # = 101
print("""\n216 in octal is:""")
print(decimal_to_octal(216)) # = 330
print("""\n512 in octal is:""")
print(decimal_to_octal(512)) # = 1000
print("""\n""")
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
set_seed(770)
__magic_name__ = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
__magic_name__ = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
__magic_name__ = os.path.dirname(os.path.abspath(__file__))
__magic_name__ = os.path.join(os.path.expanduser('~'), '.cache')
__magic_name__ = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : str=False):
A_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""])
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase)
hf_hub_download(repo_id=lowerCamelCase , filename=lowerCamelCase , local_dir=lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=False , lowerCamelCase : Tuple="text"):
if model_type == "text":
A_ : Dict = BarkSemanticModel
A_ : Dict = BarkSemanticConfig
A_ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
A_ : str = BarkCoarseModel
A_ : Any = BarkCoarseConfig
A_ : Union[str, Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
A_ : int = BarkFineModel
A_ : Optional[Any] = BarkFineConfig
A_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
A_ : Any = F'{model_type}_small' if use_small else model_type
A_ : Optional[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCamelCase):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.')
_download(model_info["""repo_id"""] , model_info["""file_name"""])
A_ : int = torch.load(lowerCamelCase , map_location=lowerCamelCase)
# this is a hack
A_ : Any = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
A_ : List[str] = model_args["""vocab_size"""]
A_ : List[Any] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
A_ : str = model_args.pop("""n_head""")
A_ : List[str] = model_args.pop("""n_embd""")
A_ : List[str] = model_args.pop("""n_layer""")
A_ : Optional[int] = ConfigClass(**checkpoint["""model_args"""])
A_ : List[str] = ModelClass(config=lowerCamelCase)
A_ : int = GenerationConfigClass()
A_ : List[Any] = model_generation_config
A_ : Any = checkpoint["""model"""]
# fixup checkpoint
A_ : List[str] = """_orig_mod."""
for k, v in list(state_dict.items()):
if k.startswith(lowerCamelCase):
# replace part of the key with corresponding layer name in HF implementation
A_ : Union[str, Any] = k[len(lowerCamelCase) :]
for old_layer_name in new_layer_name_dict:
A_ : List[str] = new_k.replace(lowerCamelCase , new_layer_name_dict[old_layer_name])
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase)
A_ : Any = set(state_dict.keys()) - set(model.state_dict().keys())
A_ : Dict = {k for k in extra_keys if not k.endswith(""".attn.bias""")}
A_ : str = set(model.state_dict().keys()) - set(state_dict.keys())
A_ : Optional[Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""")}
if len(lowerCamelCase) != 0:
raise ValueError(F'extra keys found: {extra_keys}')
if len(lowerCamelCase) != 0:
raise ValueError(F'missing keys: {missing_keys}')
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase)
A_ : int = model.num_parameters(exclude_embeddings=lowerCamelCase)
A_ : Optional[Any] = checkpoint["""best_val_loss"""].item()
logger.info(F'model loaded: {round(n_params/1E6 , 1)}M params, {round(lowerCamelCase , 3)} loss')
model.eval()
model.to(lowerCamelCase)
del checkpoint, state_dict
return model
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Dict=False , lowerCamelCase : str="text"):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
A_ : Tuple = """cpu""" # do conversion on cpu
A_ : str = _get_ckpt_path(lowerCamelCase , use_small=lowerCamelCase)
A_ : Optional[Any] = _load_model(lowerCamelCase , lowerCamelCase , model_type=lowerCamelCase , use_small=lowerCamelCase)
# load bark initial model
A_ : Optional[Any] = _bark_load_model(lowerCamelCase , """cpu""" , model_type=lowerCamelCase , use_small=lowerCamelCase)
if model_type == "text":
A_ : Optional[int] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowerCamelCase) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""")
# check if same output as the bark model
A_ : Dict = 5
A_ : Dict = 10
if model_type in ["text", "coarse"]:
A_ : Optional[int] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int)
A_ : Optional[int] = bark_model(lowerCamelCase)[0]
A_ : str = model(lowerCamelCase)
# take last logits
A_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
A_ : Tuple = 3
A_ : Tuple = 8
A_ : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int)
A_ : int = model(lowerCamelCase , lowerCamelCase)
A_ : Any = bark_model(lowerCamelCase , lowerCamelCase)
A_ : Dict = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""")
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""")
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Dict , ):
A_ : List[str] = os.path.join(lowerCamelCase , lowerCamelCase)
A_ : Optional[int] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json"""))
A_ : int = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json"""))
A_ : Any = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json"""))
A_ : Optional[Any] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""")
A_ : Any = BarkSemanticModel.from_pretrained(lowerCamelCase)
A_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCamelCase)
A_ : Dict = BarkFineModel.from_pretrained(lowerCamelCase)
A_ : str = EncodecModel.from_pretrained("""facebook/encodec_24khz""")
A_ : Any = BarkConfig.from_sub_model_configs(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config)
A_ : str = BarkModel(lowerCamelCase)
A_ : int = semantic
A_ : Tuple = coarseAcoustic
A_ : Any = fineAcoustic
A_ : Tuple = codec
A_ : List[Any] = bark_generation_config
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
bark.save_pretrained(lowerCamelCase , repo_id=lowerCamelCase , push_to_hub=lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
__magic_name__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
__magic_name__ = '#'
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
A_ : dict = {}
def _a ( self : List[str] ,_a : str ):
'''simple docstring'''
A_ : Optional[Any] = self._trie
for char in text:
if char not in trie:
A_ : Dict = {}
A_ : List[Any] = trie[char]
A_ : Optional[Any] = True
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
A_ : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
A_ : Optional[Any] = trie[char]
else:
return []
return self._elements(_a )
def _a ( self : Dict ,_a : dict ):
'''simple docstring'''
A_ : Optional[Any] = []
for c, v in d.items():
A_ : List[str] = [""" """] if c == END else [(c + s) for s in self._elements(_a )]
result.extend(_a )
return tuple(_a )
__magic_name__ = Trie()
__magic_name__ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def lowerCamelCase ( lowerCamelCase : str):
A_ : str = trie.find_word(lowerCamelCase)
return tuple(string + word for word in suffixes)
def lowerCamelCase ( ):
print(autocomplete_using_trie("""de"""))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__magic_name__ = '.'
if __name__ == "__main__":
__magic_name__ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
__magic_name__ = []
__magic_name__ = []
with open(doctest_file_path) as fp:
for line in fp:
__magic_name__ = line.strip()
__magic_name__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__magic_name__ = '\n'.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
from math import factorial
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : List[str] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = real
if isinstance(_a ,_a ):
A_ : str = [1] * rank
else:
A_ : Dict = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
f'{self.real}+'
f'{"+".join(str(_a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,_a )
def __add__( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
if not isinstance(_a ,_a ):
return Dual(self.real + other ,self.duals )
A_ : Dict = self.duals.copy()
A_ : Dict = other.duals.copy()
if len(_a ) > len(_a ):
o_dual.extend([1] * (len(_a ) - len(_a )) )
elif len(_a ) < len(_a ):
s_dual.extend([1] * (len(_a ) - len(_a )) )
A_ : str = []
for i in range(len(_a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,_a )
a_ = __add__
def __sub__( self : Optional[int] ,_a : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : List[Any] ,_a : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
A_ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,_a )
A_ : int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,_a )
a_ = __mul__
def __truediv__( self : Dict ,_a : Dict ):
'''simple docstring'''
if not isinstance(_a ,_a ):
A_ : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,_a )
raise ValueError
def __floordiv__( self : Any ,_a : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
A_ : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,_a )
raise ValueError
def __pow__( self : str ,_a : Union[str, Any] ):
'''simple docstring'''
if n < 0 or isinstance(_a ,_a ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
A_ : int = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : int):
if not callable(lowerCamelCase):
raise ValueError("""differentiate() requires a function as input for func""")
if not isinstance(lowerCamelCase , (float, int)):
raise ValueError("""differentiate() requires a float as input for position""")
if not isinstance(lowerCamelCase , lowerCamelCase):
raise ValueError("""differentiate() requires an int as input for order""")
A_ : Tuple = Dual(lowerCamelCase , 1)
A_ : int = func(lowerCamelCase)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase ( lowerCamelCase : List[Any]):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__magic_name__ = logging.get_logger(__name__)
# General docstring
__magic_name__ = 'PoolFormerConfig'
# Base docstring
__magic_name__ = 'sail/poolformer_s12'
__magic_name__ = [1, 512, 7, 7]
# Image classification docstring
__magic_name__ = 'sail/poolformer_s12'
__magic_name__ = 'tabby, tabby cat'
__magic_name__ = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : float = 0.0 , lowerCamelCase : bool = False):
if drop_prob == 0.0 or not training:
return input
A_ : int = 1 - drop_prob
A_ : str = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A_ : int = keep_prob + torch.rand(lowerCamelCase , dtype=input.dtype , device=input.device)
random_tensor.floor_() # binarize
A_ : List[Any] = input.div(lowerCamelCase) * random_tensor
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_a : Optional[float] = None ):
'''simple docstring'''
super().__init__()
A_ : str = drop_prob
def _a ( self : Union[str, Any] ,_a : torch.Tensor ):
'''simple docstring'''
return drop_path(_a ,self.drop_prob ,self.training )
def _a ( self : Dict ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,_a : Dict ,_a : Optional[int] ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : str=None ):
'''simple docstring'''
super().__init__()
A_ : str = patch_size if isinstance(_a ,collections.abc.Iterable ) else (patch_size, patch_size)
A_ : Dict = stride if isinstance(_a ,collections.abc.Iterable ) else (stride, stride)
A_ : Any = padding if isinstance(_a ,collections.abc.Iterable ) else (padding, padding)
A_ : List[Any] = nn.Convad(_a ,_a ,kernel_size=_a ,stride=_a ,padding=_a )
A_ : Dict = norm_layer(_a ) if norm_layer else nn.Identity()
def _a ( self : List[str] ,_a : int ):
'''simple docstring'''
A_ : str = self.projection(_a )
A_ : Tuple = self.norm(_a )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : Optional[int] ,**_a : Dict ):
'''simple docstring'''
super().__init__(1 ,_a ,**_a )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : int ):
'''simple docstring'''
super().__init__()
A_ : Tuple = nn.AvgPoolad(_a ,stride=1 ,padding=pool_size // 2 ,count_include_pad=_a )
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
return self.pool(_a ) - hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,_a : List[str] ,_a : Any ,_a : int ,_a : int ):
'''simple docstring'''
super().__init__()
A_ : Dict = nn.Convad(_a ,_a ,1 )
A_ : Union[str, Any] = nn.Convad(_a ,_a ,1 )
A_ : Optional[Any] = PoolFormerDropPath(_a )
if isinstance(config.hidden_act ,_a ):
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
else:
A_ : Dict = config.hidden_act
def _a ( self : str ,_a : Tuple ):
'''simple docstring'''
A_ : Any = self.conva(_a )
A_ : str = self.act_fn(_a )
A_ : Optional[int] = self.drop(_a )
A_ : List[str] = self.conva(_a )
A_ : List[str] = self.drop(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Any ,_a : Tuple ,_a : List[Any] ,_a : Tuple ,_a : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
super().__init__()
A_ : List[Any] = PoolFormerPooling(_a )
A_ : str = PoolFormerOutput(_a ,_a ,_a ,_a )
A_ : List[Any] = PoolFormerGroupNorm(_a )
A_ : Optional[Any] = PoolFormerGroupNorm(_a )
# Useful for training neural nets
A_ : Optional[int] = PoolFormerDropPath(_a ) if drop_path > 0.0 else nn.Identity()
A_ : Any = config.use_layer_scale
if config.use_layer_scale:
A_ : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) ,requires_grad=_a )
A_ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) ,requires_grad=_a )
def _a ( self : Optional[int] ,_a : Any ):
'''simple docstring'''
if self.use_layer_scale:
A_ : Optional[Any] = self.pooling(self.before_norm(_a ) )
A_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A_ : Optional[int] = hidden_states + self.drop_path(_a )
A_ : Optional[Any] = ()
A_ : List[Any] = self.output(self.after_norm(_a ) )
A_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A_ : Union[str, Any] = hidden_states + self.drop_path(_a )
A_ : Dict = (output,) + outputs
return outputs
else:
A_ : Union[str, Any] = self.drop_path(self.pooling(self.before_norm(_a ) ) )
# First residual connection
A_ : Any = pooling_output + hidden_states
A_ : List[str] = ()
# Second residual connection inside the PoolFormerOutput block
A_ : List[Any] = self.drop_path(self.output(self.after_norm(_a ) ) )
A_ : str = hidden_states + layer_output
A_ : Optional[Any] = (output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
super().__init__()
A_ : Any = config
# stochastic depth decay rule
A_ : Dict = [x.item() for x in torch.linspace(0 ,config.drop_path_rate ,sum(config.depths ) )]
# patch embeddings
A_ : Tuple = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] ,stride=config.strides[i] ,padding=config.padding[i] ,num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] ,hidden_size=config.hidden_sizes[i] ,) )
A_ : Optional[Any] = nn.ModuleList(_a )
# Transformer blocks
A_ : Tuple = []
A_ : int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A_ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_a ,num_channels=config.hidden_sizes[i] ,pool_size=config.pool_size ,hidden_size=config.hidden_sizes[i] ,intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) ,drop_path=dpr[cur + j] ,) )
blocks.append(nn.ModuleList(_a ) )
A_ : List[Any] = nn.ModuleList(_a )
def _a ( self : Tuple ,_a : List[str] ,_a : List[Any]=False ,_a : Dict=True ):
'''simple docstring'''
A_ : List[Any] = () if output_hidden_states else None
A_ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings ,self.block ) ):
A_ : List[str] = layers
# Get patch embeddings from hidden_states
A_ : Any = embedding_layer(_a )
# Send the embeddings through the blocks
for _, blk in enumerate(_a ):
A_ : Tuple = blk(_a )
A_ : List[str] = layer_outputs[0]
if output_hidden_states:
A_ : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a ,hidden_states=_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = PoolFormerConfig
a_ = """poolformer"""
a_ = """pixel_values"""
a_ = True
def _a ( self : int ,_a : List[str] ):
'''simple docstring'''
if isinstance(_a ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a ,nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _a ( self : Any ,_a : Optional[int] ,_a : List[str]=False ):
'''simple docstring'''
if isinstance(_a ,_a ):
A_ : int = value
__magic_name__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__magic_name__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str ,_a : str ):
'''simple docstring'''
super().__init__(_a )
A_ : List[str] = config
A_ : Optional[Any] = PoolFormerEncoder(_a )
# Initialize weights and apply final processing
self.post_init()
def _a ( self : Optional[int] ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _a ( self : List[str] ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A_ : List[Any] = self.encoder(
_a ,output_hidden_states=_a ,return_dict=_a ,)
A_ : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_a ,hidden_states=encoder_outputs.hidden_states ,)
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_a : str ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = nn.Linear(config.hidden_size ,config.hidden_size )
def _a ( self : str ,_a : Any ):
'''simple docstring'''
A_ : Optional[Any] = self.dense(_a )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : List[str] ):
'''simple docstring'''
super().__init__(_a )
A_ : List[str] = config.num_labels
A_ : str = PoolFormerModel(_a )
# Final norm
A_ : Union[str, Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A_ : Optional[Any] = (
nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _a ( self : str ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[torch.LongTensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.poolformer(
_a ,output_hidden_states=_a ,return_dict=_a ,)
A_ : Optional[Any] = outputs[0]
A_ : Optional[int] = self.classifier(self.norm(_a ).mean([-2, -1] ) )
A_ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Optional[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : List[Any] = """single_label_classification"""
else:
A_ : int = """multi_label_classification"""
if self.config.problem_type == "regression":
A_ : Optional[Any] = MSELoss()
if self.num_labels == 1:
A_ : Optional[int] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A_ : List[Any] = loss_fct(_a ,_a )
elif self.config.problem_type == "single_label_classification":
A_ : List[str] = CrossEntropyLoss()
A_ : List[str] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Dict = BCEWithLogitsLoss()
A_ : List[str] = loss_fct(_a ,_a )
if not return_dict:
A_ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a ,logits=_a ,hidden_states=outputs.hidden_states )
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
__magic_name__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[str] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
A_ : Stack[int] = Stack()
A_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase))
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase)
elif i == ")":
# RULE 4
A_ : List[str] = operator_stack.peek()
operator_stack.pop()
A_ : List[Any] = operand_stack.peek()
operand_stack.pop()
A_ : List[Any] = operand_stack.peek()
operand_stack.pop()
A_ : Any = operators[opr](lowerCamelCase , lowerCamelCase)
operand_stack.push(lowerCamelCase)
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : Any=None ):
'''simple docstring'''
A_ : List[Any] = data
A_ : Optional[int] = None
def __repr__( self : Any ):
'''simple docstring'''
A_ : Any = []
A_ : List[str] = self
while temp:
string_rep.append(f'{temp.data}' )
A_ : Dict = temp.next
return "->".join(_a )
def lowerCamelCase ( lowerCamelCase : list):
if not elements_list:
raise Exception("""The Elements List is empty""")
A_ : Tuple = Node(elements_list[0])
for i in range(1 , len(lowerCamelCase)):
A_ : Tuple = Node(elements_list[i])
A_ : Union[str, Any] = current.next
return head
def lowerCamelCase ( lowerCamelCase : Node):
if head_node is not None and isinstance(lowerCamelCase , lowerCamelCase):
print_reverse(head_node.next)
print(head_node.data)
def lowerCamelCase ( ):
from doctest import testmod
testmod()
A_ : List[Any] = make_linked_list([14, 52, 14, 12, 43])
print("""Linked List:""")
print(lowerCamelCase)
print("""Elements in Reverse:""")
print_reverse(lowerCamelCase)
if __name__ == "__main__":
main()
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """Wav2Vec2FeatureExtractor"""
a_ = """AutoTokenizer"""
def __init__( self : str ,_a : str ,_a : Any ):
'''simple docstring'''
super().__init__(_a ,_a )
A_ : List[str] = self.feature_extractor
A_ : Dict = False
@classmethod
def _a ( cls : Any ,_a : List[str] ,**_a : Optional[int] ):
'''simple docstring'''
try:
return super().from_pretrained(_a ,**_a )
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ ,_a ,)
A_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(_a ,**_a )
A_ : Any = WavaVecaCTCTokenizer.from_pretrained(_a ,**_a )
return cls(feature_extractor=_a ,tokenizer=_a )
def __call__( self : int ,*_a : List[Any] ,**_a : Optional[int] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a ,**_a )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A_ : str = kwargs.pop("""raw_speech""" )
else:
A_ : List[Any] = kwargs.pop("""audio""" ,_a )
A_ : Optional[Any] = kwargs.pop("""sampling_rate""" ,_a )
A_ : List[str] = kwargs.pop("""text""" ,_a )
if len(_a ) > 0:
A_ : List[Any] = args[0]
A_ : Any = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A_ : Union[str, Any] = self.feature_extractor(_a ,*_a ,sampling_rate=_a ,**_a )
if text is not None:
A_ : Dict = self.tokenizer(_a ,**_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ : Dict = encodings["""input_ids"""]
return inputs
def _a ( self : Optional[int] ,*_a : Union[str, Any] ,**_a : List[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_a ,**_a )
A_ : Optional[int] = kwargs.pop("""input_features""" ,_a )
A_ : Union[str, Any] = kwargs.pop("""labels""" ,_a )
if len(_a ) > 0:
A_ : int = args[0]
A_ : Tuple = args[1:]
if input_features is not None:
A_ : List[str] = self.feature_extractor.pad(_a ,*_a ,**_a )
if labels is not None:
A_ : Tuple = self.tokenizer.pad(_a ,**_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ : str = labels["""input_ids"""]
return input_features
def _a ( self : Optional[int] ,*_a : Tuple ,**_a : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : Union[str, Any] ,*_a : List[str] ,**_a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@contextmanager
def _a ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A_ : int = True
A_ : List[str] = self.tokenizer
yield
A_ : Optional[int] = self.feature_extractor
A_ : Optional[Any] = False
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = IFInpaintingPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _a ( self : int ):
'''simple docstring'''
return self._get_dummy_components()
def _a ( self : List[str] ,_a : Tuple ,_a : Any=0 ):
'''simple docstring'''
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : Tuple = torch.Generator(device=_a ).manual_seed(_a )
A_ : Dict = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_a ) ).to(_a )
A_ : Optional[int] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_a ) ).to(_a )
A_ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _a ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Optional[Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def _a ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : Any ):
'''simple docstring'''
self._test_save_load_local()
def _a ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]=False):
A_ : Optional[int] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight'))
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias'))
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight'))
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias'))
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias'))
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
])
return rename_keys
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[Any]=False):
for i in range(config.num_hidden_layers):
if base_model:
A_ : str = """"""
else:
A_ : Any = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F'blocks.{i}.attn.qkv.weight')
A_ : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : str = in_proj_bias[: config.hidden_size]
A_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( lowerCamelCase : int):
A_ : Union[str, Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : int):
A_ : List[str] = dct.pop(lowerCamelCase)
A_ : Tuple = val
def lowerCamelCase ( ):
A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]=True):
A_ : Any = ViTConfig()
# patch_size
if model_name[-1] == "8":
A_ : Tuple = 8
# set labels if required
if not base_model:
A_ : str = 1000
A_ : Any = """huggingface/label-files"""
A_ : Optional[int] = """imagenet-1k-id2label.json"""
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : Any = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : int = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A_ : Any = 384
A_ : Tuple = 1536
A_ : Union[str, Any] = 12
A_ : Union[str, Any] = 6
# load original model from torch hub
A_ : Tuple = torch.hub.load("""facebookresearch/dino:main""" , lowerCamelCase)
original_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase)
A_ : Optional[Any] = create_rename_keys(lowerCamelCase , base_model=lowerCamelCase)
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase)
read_in_q_k_v(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# load HuggingFace model
if base_model:
A_ : Tuple = ViTModel(lowerCamelCase , add_pooling_layer=lowerCamelCase).eval()
else:
A_ : Union[str, Any] = ViTForImageClassification(lowerCamelCase).eval()
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by ViTImageProcessor
A_ : Tuple = ViTImageProcessor()
A_ : str = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = encoding["""pixel_values"""]
A_ : Tuple = model(lowerCamelCase)
if base_model:
A_ : str = original_model(lowerCamelCase)
assert torch.allclose(lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1)
else:
A_ : int = original_model(lowerCamelCase)
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1E-3)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__magic_name__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = 'T5Config'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """mt5"""
a_ = MTaConfig
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """mt5"""
a_ = MTaConfig
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """mt5"""
a_ = MTaConfig
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase ( lowerCamelCase : str):
'''simple docstring'''
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
'''simple docstring'''
for char in word:
A_ : List[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
'''simple docstring'''
A_ : int = set()
for token in tokens:
A_ : List[str] = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : int = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
A_ : Optional[int] = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : int = bert_tokens
A_ : Optional[Any] = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : int = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Any = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Optional[Any] = """##""" + bert_word[j]
A_ : Dict = start + i
A_ : int = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : str = ltp_tokenizer.seg(lines[i : i + 100])[0]
A_ : Optional[int] = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Dict = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Optional[Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : int = []
for id in input_ids:
A_ : Optional[int] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : Tuple = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : List[str]):
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Tuple = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : int = LTP(args.ltp) # faster in GPU device
A_ : Optional[Any] = BertTokenizer.from_pretrained(args.bert)
A_ : Union[str, Any] = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : List[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__magic_name__ = parser.parse_args()
main(args)
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[Any]=None):
# Recurse if needed
if "." in tensor_name:
A_ : Union[str, Any] = tensor_name.split(""".""")
for split in splits[:-1]:
A_ : List[str] = getattr(lowerCamelCase , lowerCamelCase)
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.')
A_ : Any = new_module
A_ : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.')
A_ : Union[str, Any] = tensor_name in module._buffers
A_ : str = getattr(lowerCamelCase , lowerCamelCase)
if old_value.device == torch.device("""meta""") and device not in ["meta", torch.device("""meta""")] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.')
A_ : int = False
A_ : Any = False
if is_buffer or not is_bitsandbytes_available():
A_ : Tuple = False
A_ : str = False
else:
A_ : Any = hasattr(bnb.nn , """Params4bit""") and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit)
A_ : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams)
if is_abit or is_abit:
A_ : Optional[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A_ : int = old_value.to(lowerCamelCase)
elif isinstance(lowerCamelCase , torch.Tensor):
A_ : Tuple = value.to("""cpu""")
if value.dtype == torch.inta:
A_ : int = version.parse(importlib.metadata.version("""bitsandbytes""")) > version.parse(
"""0.37.2""")
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""")
else:
A_ : List[str] = torch.tensor(lowerCamelCase , device="""cpu""")
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCamelCase) and fpaa_statistics is None:
A_ : Tuple = new_value.T
A_ : str = old_value.__dict__
if is_abit:
A_ : Dict = bnb.nn.IntaParams(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase).to(lowerCamelCase)
elif is_abit:
A_ : List[str] = bnb.nn.Paramsabit(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase).to(lowerCamelCase)
A_ : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCamelCase))
else:
if value is None:
A_ : Optional[Any] = old_value.to(lowerCamelCase)
elif isinstance(lowerCamelCase , torch.Tensor):
A_ : List[Any] = value.to(lowerCamelCase)
else:
A_ : Any = torch.tensor(lowerCamelCase , device=lowerCamelCase)
if is_buffer:
A_ : Dict = new_value
else:
A_ : List[Any] = nn.Parameter(lowerCamelCase , requires_grad=old_value.requires_grad)
A_ : Any = new_value
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=False):
for name, module in model.named_children():
if current_key_name is None:
A_ : Tuple = []
current_key_name.append(lowerCamelCase)
if (isinstance(lowerCamelCase , nn.Linear) or isinstance(lowerCamelCase , lowerCamelCase)) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowerCamelCase) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = module.weight.shape
else:
A_ : Optional[Any] = module.in_features
A_ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A_ : List[Any] = bnb.nn.LinearabitLt(
lowerCamelCase , lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A_ : Any = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A_ : Optional[int] = bnb.nn.Linearabit(
lowerCamelCase , lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A_ : Tuple = True
# Store the module class in case we need to transpose the weight later
A_ : int = type(lowerCamelCase)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCamelCase)
if len(list(module.children())) > 0:
A_ : Any = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_been_replaced=lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=None , lowerCamelCase : Union[str, Any]=None):
A_ : str = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
A_ : str = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""")
return model
def lowerCamelCase ( *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any]):
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCamelCase , )
return replace_with_bnb_linear(*lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( *lowerCamelCase : Tuple , **lowerCamelCase : Dict):
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCamelCase , )
return set_module_quantized_tensor_to_device(*lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = deepcopy(lowerCamelCase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A_ : Optional[Any] = find_tied_parameters(lowerCamelCase)
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : str = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
A_ : Dict = sum(lowerCamelCase , [])
A_ : List[Any] = len(lowerCamelCase) > 0
# Check if it is a base model
A_ : Union[str, Any] = not hasattr(lowerCamelCase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ : int = list(model.named_children())
A_ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
A_ : Any = set(lowerCamelCase) - set(lowerCamelCase)
A_ : Any = list(set(lowerCamelCase)) + list(lowerCamelCase)
# remove ".weight" from the keys
A_ : Dict = [""".weight""", """.bias"""]
A_ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ : Any = name.replace(lowerCamelCase , """""")
filtered_module_names.append(lowerCamelCase)
return filtered_module_names
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """whisper"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str ,_a : List[str]=51865 ,_a : Optional[Any]=80 ,_a : List[Any]=6 ,_a : int=4 ,_a : Optional[int]=6 ,_a : int=4 ,_a : Optional[Any]=1536 ,_a : Union[str, Any]=1536 ,_a : Union[str, Any]=0.0 ,_a : List[Any]=0.0 ,_a : Tuple=50257 ,_a : Optional[int]=True ,_a : Dict=True ,_a : int="gelu" ,_a : Dict=256 ,_a : Optional[Any]=0.0 ,_a : Optional[Any]=0.0 ,_a : Union[str, Any]=0.0 ,_a : Optional[Any]=0.02 ,_a : Dict=False ,_a : Tuple=1500 ,_a : Dict=448 ,_a : Tuple=50256 ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : Optional[int]=None ,_a : Tuple=[220, 50256] ,_a : Dict=False ,_a : List[str]=256 ,_a : Optional[int]=False ,_a : int=0.05 ,_a : Any=10 ,_a : int=2 ,_a : int=0.0 ,_a : Union[str, Any]=10 ,_a : Optional[int]=0 ,_a : int=7 ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Union[str, Any] = vocab_size
A_ : List[str] = num_mel_bins
A_ : Tuple = d_model
A_ : List[str] = encoder_layers
A_ : List[Any] = encoder_attention_heads
A_ : Dict = decoder_layers
A_ : Any = decoder_attention_heads
A_ : Union[str, Any] = decoder_ffn_dim
A_ : Optional[Any] = encoder_ffn_dim
A_ : Union[str, Any] = dropout
A_ : Tuple = attention_dropout
A_ : Tuple = activation_dropout
A_ : str = activation_function
A_ : Union[str, Any] = init_std
A_ : Optional[Any] = encoder_layerdrop
A_ : List[Any] = decoder_layerdrop
A_ : Tuple = use_cache
A_ : Optional[Any] = encoder_layers
A_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[int] = max_source_positions
A_ : str = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = classifier_proj_size
A_ : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Any = apply_spec_augment
A_ : Optional[int] = mask_time_prob
A_ : Optional[int] = mask_time_length
A_ : Optional[int] = mask_time_min_masks
A_ : Optional[Any] = mask_feature_prob
A_ : Optional[Any] = mask_feature_length
A_ : Any = mask_feature_min_masks
A_ : List[str] = median_filter_width
super().__init__(
pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,suppress_tokens=_a ,begin_suppress_tokens=_a ,**_a ,)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A_ : Optional[int] = {0: """batch"""}
else:
A_ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a ,direction="""inputs""" )
return common_inputs
def _a ( self : Dict ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 22050 ,_a : float = 5.0 ,_a : int = 220 ,):
'''simple docstring'''
A_ : int = OrderedDict()
A_ : Dict = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=_a ,framework=_a ,sampling_rate=_a ,time_duration=_a ,frequency=_a ,)
A_ : int = encoder_inputs["""input_features"""].shape[2]
A_ : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length
A_ : int = super().generate_dummy_inputs(
preprocessor.tokenizer ,_a ,_a ,_a ,_a )
A_ : Optional[Any] = encoder_inputs.pop("""input_features""" )
A_ : Dict = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A_ : List[Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _a ( self : Any ):
'''simple docstring'''
return 1e-3
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : str):
if isinstance(lowerCamelCase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(lowerCamelCase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(lowerCamelCase):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}')
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : List[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
A_ : Any = get_size_dict(_a ,default_to_square=_a )
A_ : List[str] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : str = get_size_dict(_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Optional[int] = do_center_crop
A_ : Dict = crop_size
A_ : str = resample
A_ : Tuple = do_rescale
A_ : int = rescale_factor
A_ : Dict = offset
A_ : Optional[Any] = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Dict ,):
'''simple docstring'''
A_ : int = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" in size:
A_ : Dict = get_resize_output_image_size(_a ,size["""shortest_edge"""] ,default_to_square=_a )
elif "height" in size and "width" in size:
A_ : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Tuple = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : int ,_a : np.ndarray ,_a : Union[int, float] ,_a : bool = True ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[int] = image.astype(np.floataa )
if offset:
A_ : Optional[int] = image - (scale / 2)
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[Any] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Tuple ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Dict[str, int] = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
A_ : Optional[int] = to_numpy_array(_a )
if do_resize:
A_ : Tuple = self.resize(image=_a ,size=_a ,resample=_a )
if do_center_crop:
A_ : Optional[int] = self.center_crop(_a ,size=_a )
if do_rescale:
A_ : Any = self.rescale(image=_a ,scale=_a ,offset=_a )
if do_normalize:
A_ : Tuple = self.normalize(image=_a ,mean=_a ,std=_a )
A_ : int = to_channel_dimension_format(_a ,_a )
return image
def _a ( self : int ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Dict[str, int] = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : ChannelDimension = ChannelDimension.FIRST ,**_a : List[Any] ,):
'''simple docstring'''
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
A_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = offset if offset is not None else self.offset
A_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : str = size if size is not None else self.size
A_ : List[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else self.crop_size
A_ : List[str] = get_size_dict(_a ,param_name="""crop_size""" )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
A_ : List[Any] = make_batched(_a )
A_ : List[Any] = [
[
self._preprocess_image(
image=_a ,do_resize=_a ,size=_a ,resample=_a ,do_center_crop=_a ,crop_size=_a ,do_rescale=_a ,rescale_factor=_a ,offset=_a ,do_normalize=_a ,image_mean=_a ,image_std=_a ,data_format=_a ,)
for img in video
]
for video in videos
]
A_ : List[Any] = {"""pixel_values""": videos}
return BatchFeature(data=_a ,tensor_type=_a )
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise ValueError("""check_bouncy() accepts only integer arguments""")
A_ : List[str] = str(lowerCamelCase)
A_ : Union[str, Any] = """""".join(sorted(lowerCamelCase))
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase ( lowerCamelCase : float = 99):
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""")
A_ : Optional[int] = 0
A_ : int = 1
while True:
if check_bouncy(lowerCamelCase):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""")
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = 1
A_ : int = 3
A_ : List[Any] = (32, 32)
A_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_a )
return image
@property
def _a ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=_a ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _a ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
return CLIPTextModel(_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.dummy_cond_unet_upscale
A_ : int = DDPMScheduler()
A_ : Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : List[str] = self.dummy_vae
A_ : List[Any] = self.dummy_text_encoder
A_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Dict = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : Optional[int] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : Tuple = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = """A painting of a squirrel eating a burger"""
A_ : Any = torch.Generator(device=_a ).manual_seed(0 )
A_ : Optional[Any] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : List[str] = output.images
A_ : str = torch.Generator(device=_a ).manual_seed(0 )
A_ : Union[str, Any] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=_a ,)[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : Dict = image_from_tuple[0, -3:, -3:, -1]
A_ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A_ : List[str] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.dummy_cond_unet_upscale
A_ : Tuple = DDPMScheduler()
A_ : List[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : int = self.dummy_vae
A_ : str = self.dummy_text_encoder
A_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : List[str] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : Dict = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = """A painting of a squirrel eating a burger"""
A_ : Union[str, Any] = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : Tuple = output.images
assert image.shape[0] == 2
A_ : Tuple = torch.Generator(device=_a ).manual_seed(0 )
A_ : List[str] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.dummy_cond_unet_upscale
A_ : Tuple = DDPMScheduler()
A_ : Tuple = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : Optional[Any] = self.dummy_vae
A_ : List[str] = self.dummy_text_encoder
A_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Tuple = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A_ : Optional[Any] = unet.half()
A_ : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
A_ : List[Any] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : str = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Optional[int] = """A painting of a squirrel eating a burger"""
A_ : List[Any] = torch.manual_seed(0 )
A_ : str = sd_pipe(
[prompt] ,image=_a ,generator=_a ,num_inference_steps=2 ,output_type="""np""" ,).images
A_ : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
A_ : List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ : Tuple = """a cat sitting on a park bench"""
A_ : int = torch.manual_seed(0 )
A_ : int = pipe(
prompt=_a ,image=_a ,generator=_a ,output_type="""np""" ,)
A_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
A_ : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : int = StableDiffusionUpscalePipeline.from_pretrained(
_a ,torch_dtype=torch.floataa ,)
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ : Any = """a cat sitting on a park bench"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : List[str] = pipe(
prompt=_a ,image=_a ,generator=_a ,output_type="""np""" ,)
A_ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : Any = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
_a ,torch_dtype=torch.floataa ,)
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : Tuple = """a cat sitting on a park bench"""
A_ : Any = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(
prompt=_a ,image=_a ,generator=_a ,num_inference_steps=5 ,output_type="""np""" ,)
A_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Tuple):
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
a_ = field(metadata={"""help""": """Should contain the data files for the task."""} )
a_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
A_ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase)
# Set seed
set_seed(training_args.seed)
try:
A_ : List[str] = processors[data_args.task_name]()
A_ : Optional[int] = processor.get_labels()
A_ : List[str] = len(lowerCamelCase)
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A_ : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A_ : List[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase : EvalPrediction) -> Dict:
A_ : Dict = np.argmax(p.predictions , axis=1)
return {"acc": simple_accuracy(lowerCamelCase , p.label_ids)}
# Data collator
A_ : str = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
A_ : Optional[Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
A_ : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
A_ : List[str] = trainer.evaluate()
A_ : Union[str, Any] = os.path.join(training_args.output_dir , """eval_results.txt""")
if trainer.is_world_master():
with open(lowerCamelCase , """w""") as writer:
logger.info("""***** Eval results *****""")
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase , lowerCamelCase)
writer.write("""%s = %s\n""" % (key, value))
results.update(lowerCamelCase)
return results
def lowerCamelCase ( lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """imagegpt"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str ,_a : int=512 + 1 ,_a : List[str]=32 * 32 ,_a : int=512 ,_a : Optional[int]=24 ,_a : List[str]=8 ,_a : Dict=None ,_a : Optional[Any]="quick_gelu" ,_a : str=0.1 ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : str=1e-5 ,_a : Union[str, Any]=0.02 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[Any]=False ,_a : List[Any]=False ,_a : str=False ,**_a : Tuple ,):
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : List[Any] = n_positions
A_ : Any = n_embd
A_ : List[Any] = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : Tuple = activation_function
A_ : Union[str, Any] = resid_pdrop
A_ : Union[str, Any] = embd_pdrop
A_ : Tuple = attn_pdrop
A_ : List[Any] = layer_norm_epsilon
A_ : Union[str, Any] = initializer_range
A_ : Any = scale_attn_weights
A_ : int = use_cache
A_ : Tuple = scale_attn_by_inverse_layer_idx
A_ : Optional[Any] = reorder_and_upcast_attn
A_ : Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _a ( self : int ,_a : "FeatureExtractionMixin" ,_a : int = 1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 32 ,_a : int = 32 ,):
'''simple docstring'''
A_ : Any = self._generate_dummy_images(_a ,_a ,_a ,_a )
A_ : List[Any] = dict(preprocessor(images=_a ,return_tensors=_a ) )
return inputs
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,*_a : List[Any] ,_a : Optional[Any]=None ,_a : int=None ,**_a : Tuple ):
'''simple docstring'''
super().__init__(*_a ,**_a )
A_ : Tuple = eval_examples
A_ : Optional[int] = post_process_function
def _a ( self : Tuple ,_a : Tuple=None ,_a : Union[str, Any]=None ,_a : List[Any]=None ,_a : str = "eval" ):
'''simple docstring'''
A_ : str = self.eval_dataset if eval_dataset is None else eval_dataset
A_ : Optional[Any] = self.get_eval_dataloader(_a )
A_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A_ : List[str] = self.compute_metrics
A_ : Tuple = None
A_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A_ : List[str] = time.time()
try:
A_ : Dict = eval_loop(
_a ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
A_ : Optional[Any] = compute_metrics
A_ : Tuple = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A_ : int = self.post_process_function(_a ,_a ,output.predictions )
A_ : str = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : List[Any] = metrics.pop(_a )
metrics.update(output.metrics )
else:
A_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A_ : List[Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_a )
return metrics
def _a ( self : Optional[Any] ,_a : Dict ,_a : List[Any] ,_a : List[Any]=None ,_a : str = "test" ):
'''simple docstring'''
A_ : str = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
A_ : Any = self.compute_metrics
A_ : List[Any] = None
A_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A_ : int = time.time()
try:
A_ : str = eval_loop(
_a ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
A_ : Tuple = compute_metrics
A_ : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A_ : Any = self.post_process_function(_a ,_a ,output.predictions ,"""predict""" )
A_ : Union[str, Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : str = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_a )
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = self.dummy_uncond_unet
A_ : List[str] = KarrasVeScheduler()
A_ : Union[str, Any] = KarrasVePipeline(unet=_a ,scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = torch.manual_seed(0 )
A_ : Union[str, Any] = pipe(num_inference_steps=2 ,generator=_a ,output_type="""numpy""" ).images
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(num_inference_steps=2 ,generator=_a ,output_type="""numpy""" ,return_dict=_a )[0]
A_ : Dict = image[0, -3:, -3:, -1]
A_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = """google/ncsnpp-celebahq-256"""
A_ : Dict = UNetaDModel.from_pretrained(_a )
A_ : List[str] = KarrasVeScheduler()
A_ : Any = KarrasVePipeline(unet=_a ,scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : str = torch.manual_seed(0 )
A_ : Optional[int] = pipe(num_inference_steps=20 ,generator=_a ,output_type="""numpy""" ).images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Dict = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
from collections import defaultdict
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : List[str] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A_ : Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_a ) )
]
A_ : Any = defaultdict(_a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A_ : str = (1 << len(_a )) - 1
def _a ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A_ : Optional[int] = self.count_ways_until(_a ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
A_ : int = total_ways_util
return self.dp[mask][task_no]
def _a ( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
for i in range(len(_a ) ):
for j in task_performed[i]:
self.task[j].append(_a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
__magic_name__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__magic_name__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() ,encoding="""utf-8""" ,check=_a ,)
assert hasattr(self ,"""env""" )
def _a ( self : Optional[Any] ,_a : Dict ):
'''simple docstring'''
A_ : str = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
A_ : Dict = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=_a ,instance_count=_a ,instance_type=self.instance_type ,debugger_hook_config=_a ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=_a ,py_version="""py36""" ,)
def _a ( self : Tuple ,_a : Tuple ):
'''simple docstring'''
TrainingJobAnalytics(_a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _a ( self : Any ,_a : str ):
'''simple docstring'''
A_ : List[Any] = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
A_ : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A_ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A_ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_a )
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'sentencepiece.bpe.model'}
__magic_name__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__magic_name__ = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__magic_name__ = '▁'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : str ,_a : Tuple ,_a : List[Any]="<s>" ,_a : Dict="</s>" ,_a : Union[str, Any]="</s>" ,_a : Tuple="<s>" ,_a : int="<unk>" ,_a : Union[str, Any]="<pad>" ,_a : Any="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Any = vocab_file
A_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
A_ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
A_ : Optional[Any] = len(self.sp_model ) - 1
A_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
A_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : str ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Dict ,_a : List[str] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : str = self.sp_model.PieceToId(_a )
return spm_id if spm_id else self.unk_token_id
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_a )
def _a ( self : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : int = """"""
A_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : int = True
A_ : Optional[int] = []
else:
current_sub_tokens.append(_a )
A_ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self : List[str] ):
'''simple docstring'''
A_ : str = self.__dict__.copy()
A_ : Any = None
return state
def __setstate__( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Optional[int] = {}
A_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : List[Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Dict = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : int = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : List[Any]): # noqa: E741
A_ : Optional[int] = len(lowerCamelCase)
A_ : Tuple = 0
A_ : Tuple = [0] * n
A_ : Dict = [False] * n
A_ : int = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]):
if parent == root:
out_edge_count += 1
A_ : Optional[int] = True
A_ : Any = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A_ : str = dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : Any = min(low[at] , low[to])
# AP found via bridge
if at < low[to]:
A_ : int = True
# AP found via cycle
if at == low[to]:
A_ : List[Any] = True
else:
A_ : Any = min(low[at] , lowerCamelCase)
return out_edge_count
for i in range(lowerCamelCase):
if not visited[i]:
A_ : Tuple = 0
A_ : int = dfs(lowerCamelCase , lowerCamelCase , -1 , lowerCamelCase)
A_ : Union[str, Any] = out_edge_count > 1
for x in range(len(lowerCamelCase)):
if is_art[x] is True:
print(lowerCamelCase)
# Adjacency list of graph
__magic_name__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str ,_a : Any ):
'''simple docstring'''
A_ : Optional[int] = data
A_ : Optional[Any] = [0X67452301, 0XEFCDAB89, 0X98BADCFE, 0X10325476, 0XC3D2E1F0]
@staticmethod
def _a ( _a : Dict ,_a : Tuple ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0XFFFFFFFF
def _a ( self : Tuple ):
'''simple docstring'''
A_ : str = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
A_ : Union[str, Any] = self.data + padding + struct.pack(""">Q""" ,8 * len(self.data ) )
return padded_data
def _a ( self : Optional[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 )
]
def _a ( self : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = list(struct.unpack(""">16L""" ,_a ) ) + [0] * 64
for i in range(16 ,80 ):
A_ : List[str] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 )
return w
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.padding()
A_ : Union[str, Any] = self.split_blocks()
for block in self.blocks:
A_ : Tuple = self.expand_block(_a )
A_ : str = self.h
for i in range(0 ,80 ):
if 0 <= i < 20:
A_ : Optional[Any] = (b & c) | ((~b) & d)
A_ : str = 0X5A827999
elif 20 <= i < 40:
A_ : int = b ^ c ^ d
A_ : Union[str, Any] = 0X6ED9EBA1
elif 40 <= i < 60:
A_ : List[Any] = (b & c) | (b & d) | (c & d)
A_ : Union[str, Any] = 0X8F1BBCDC
elif 60 <= i < 80:
A_ : List[str] = b ^ c ^ d
A_ : Tuple = 0XCA62C1D6
A_ : Tuple = (
self.rotate(_a ,5 ) + f + e + k + expanded_block[i] & 0XFFFFFFFF,
a,
self.rotate(_a ,30 ),
c,
d,
)
A_ : Union[str, Any] = (
self.h[0] + a & 0XFFFFFFFF,
self.h[1] + b & 0XFFFFFFFF,
self.h[2] + c & 0XFFFFFFFF,
self.h[3] + d & 0XFFFFFFFF,
self.h[4] + e & 0XFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCamelCase ( ):
A_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(lowerCamelCase).final_hash() == hashlib.shaa(lowerCamelCase).hexdigest() # noqa: S324
def lowerCamelCase ( ):
A_ : Tuple = argparse.ArgumentParser(description="""Process some strings or files""")
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""")
A_ : Any = parser.parse_args()
A_ : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""") as f:
A_ : List[str] = f.read()
else:
A_ : List[Any] = bytes(lowerCamelCase , """utf-8""")
print(SHAaHash(lowerCamelCase).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
'''simple docstring'''
a_ = None
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
A_ : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] ,_a )
def _a ( self : int ):
'''simple docstring'''
A_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Tuple = os.path.join(_a ,"""feat_extract.json""" )
feat_extract_first.to_json_file(_a )
A_ : List[str] = self.feature_extraction_class.from_json_file(_a )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[int] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
A_ : Any = self.feature_extraction_class.from_pretrained(_a )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(_a )
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
import numpy as np
def lowerCamelCase ( lowerCamelCase : np.array):
return (2 / (1 + np.exp(-2 * vector))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any):
A_ : List[Any] = np.argmax(lowerCamelCase , axis=1)
return np.sum(outputs == labels)
def lowerCamelCase ( lowerCamelCase : List[str]):
with open(lowerCamelCase , encoding="""utf_8""") as f:
A_ : str = csv.reader(lowerCamelCase)
A_ : Tuple = []
next(lowerCamelCase) # skip the first line
for line in tqdm(lowerCamelCase):
output.append((""" """.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict):
A_ : int = []
for dataset in encoded_datasets:
A_ : int = len(lowerCamelCase)
A_ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
A_ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa)
A_ : List[str] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa)
A_ : Union[str, Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCamelCase):
A_ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Dict = with_conta
A_ : Dict = with_conta
A_ : str = len(lowerCamelCase) - 1
A_ : Any = len(lowerCamelCase) - 1
A_ : Optional[Any] = with_conta
A_ : Dict = with_conta
A_ : Optional[Any] = mc_label
A_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCamelCase) for t in all_inputs))
return tensor_datasets
def lowerCamelCase ( ):
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=lowerCamelCase , default="""openai-gpt""" , help="""pretrained model name""")
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""")
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""")
parser.add_argument(
"""--output_dir""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=lowerCamelCase , default="""""")
parser.add_argument("""--eval_dataset""" , type=lowerCamelCase , default="""""")
parser.add_argument("""--seed""" , type=lowerCamelCase , default=42)
parser.add_argument("""--num_train_epochs""" , type=lowerCamelCase , default=3)
parser.add_argument("""--train_batch_size""" , type=lowerCamelCase , default=8)
parser.add_argument("""--eval_batch_size""" , type=lowerCamelCase , default=16)
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=lowerCamelCase , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , type=lowerCamelCase , default=1)
parser.add_argument(
"""--max_steps""" , default=-1 , type=lowerCamelCase , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=lowerCamelCase , default=6.25E-5)
parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCamelCase , help="""Linear warmup over warmup_steps.""")
parser.add_argument("""--lr_schedule""" , type=lowerCamelCase , default="""warmup_linear""")
parser.add_argument("""--weight_decay""" , type=lowerCamelCase , default=0.01)
parser.add_argument("""--lm_coef""" , type=lowerCamelCase , default=0.9)
parser.add_argument("""--n_valid""" , type=lowerCamelCase , default=374)
parser.add_argument("""--server_ip""" , type=lowerCamelCase , default="""""" , help="""Can be used for distant debugging.""")
parser.add_argument("""--server_port""" , type=lowerCamelCase , default="""""" , help="""Can be used for distant debugging.""")
A_ : str = parser.parse_args()
print(lowerCamelCase)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
A_ : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
A_ : List[str] = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(lowerCamelCase , lowerCamelCase))
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A_ : int = ["""_start_""", """_delimiter_""", """_classify_"""]
A_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(lowerCamelCase)
A_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase)
A_ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(lowerCamelCase))
model.to(lowerCamelCase)
# Load and encode the datasets
def tokenize_and_encode(lowerCamelCase : List[Any]):
if isinstance(lowerCamelCase , lowerCamelCase):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase))
elif isinstance(lowerCamelCase , lowerCamelCase):
return obj
return [tokenize_and_encode(lowerCamelCase) for o in obj]
logger.info("""Encoding dataset...""")
A_ : Dict = load_rocstories_dataset(args.train_dataset)
A_ : Optional[Any] = load_rocstories_dataset(args.eval_dataset)
A_ : Union[str, Any] = (train_dataset, eval_dataset)
A_ : Optional[int] = tokenize_and_encode(lowerCamelCase)
# Compute the max input length for the Transformer
A_ : int = model.config.n_positions // 2 - 2
A_ : Union[str, Any] = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
A_ : Tuple = min(lowerCamelCase , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A_ : Dict = pre_process_datasets(lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase)
A_ : Optional[Any] = tensor_datasets[0], tensor_datasets[1]
A_ : List[Any] = TensorDataset(*lowerCamelCase)
A_ : Tuple = RandomSampler(lowerCamelCase)
A_ : Any = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.train_batch_size)
A_ : List[Any] = TensorDataset(*lowerCamelCase)
A_ : Tuple = SequentialSampler(lowerCamelCase)
A_ : int = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A_ : str = args.max_steps
A_ : Union[str, Any] = args.max_steps // (len(lowerCamelCase) // args.gradient_accumulation_steps) + 1
else:
A_ : List[Any] = len(lowerCamelCase) // args.gradient_accumulation_steps * args.num_train_epochs
A_ : Union[str, Any] = list(model.named_parameters())
A_ : Optional[Any] = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A_ : List[Any] = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], """weight_decay""": 0.0},
]
A_ : Union[str, Any] = AdamW(lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon)
A_ : Any = get_linear_schedule_with_warmup(
lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCamelCase)
if args.do_train:
A_ : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc="""Epoch"""):
A_ : List[str] = 0
A_ : str = 0
A_ : Union[str, Any] = tqdm(lowerCamelCase , desc="""Training""")
for step, batch in enumerate(lowerCamelCase):
A_ : Any = tuple(t.to(lowerCamelCase) for t in batch)
A_ : List[str] = batch
A_ : Any = model(lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase)
A_ : Dict = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A_ : Any = """Training loss: {:.2e} lr: {:.2e}""".format(lowerCamelCase , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A_ : Optional[int] = model.module if hasattr(lowerCamelCase , """module""") else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A_ : Dict = os.path.join(args.output_dir , lowerCamelCase)
A_ : int = os.path.join(args.output_dir , lowerCamelCase)
torch.save(model_to_save.state_dict() , lowerCamelCase)
model_to_save.config.to_json_file(lowerCamelCase)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
A_ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
A_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(lowerCamelCase)
if args.do_eval:
model.eval()
A_ : Any = 0, 0
A_ : Any = 0, 0
for batch in tqdm(lowerCamelCase , desc="""Evaluating"""):
A_ : List[Any] = tuple(t.to(lowerCamelCase) for t in batch)
A_ : int = batch
with torch.no_grad():
A_ : Any = model(
lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase)
A_ : Tuple = mc_logits.detach().cpu().numpy()
A_ : Union[str, Any] = mc_labels.to("""cpu""").numpy()
A_ : Dict = accuracy(lowerCamelCase , lowerCamelCase)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
A_ : Optional[Any] = eval_loss / nb_eval_steps
A_ : Union[str, Any] = eval_accuracy / nb_eval_examples
A_ : Tuple = tr_loss / nb_tr_steps if args.do_train else None
A_ : str = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A_ : Tuple = os.path.join(args.output_dir , """eval_results.txt""")
with open(lowerCamelCase , """w""") as writer:
logger.info("""***** Eval results *****""")
for key in sorted(result.keys()):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key]))
writer.write("""%s = %s\n""" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__magic_name__ = ['gpt2']
__magic_name__ = 'gpt2'
if is_tf_available():
class __lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Any ,_a : List[Any] ):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = tokenizer
A_ : str = AutoConfig.from_pretrained(_a )
A_ : int = TFGPTaLMHeadModel.from_config(_a )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name="""text""" ),) )
def _a ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = self.tokenizer(_a )
A_ : str = tokenized["""input_ids"""].to_tensor()
A_ : List[Any] = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A_ : int = self.model(input_ids=_a ,attention_mask=_a )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
A_ : str = [GPTaTokenizer.from_pretrained(_a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A_ : str = [TFGPTaTokenizer.from_pretrained(_a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A_ : Tuple = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
A_ : Any = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def _a ( self : Any ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
A_ : List[str] = tokenizer([test_inputs] ,return_tensors="""tf""" )
A_ : Optional[Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A_ : str = python_outputs[key].numpy()
A_ : List[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_a ,tf.intaa ) == tf_outputs_values ) )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ : Any = tf.function(_a )
for test_inputs in self.test_sentences:
A_ : int = tf.constant(_a )
A_ : Any = compiled_tokenizer(_a )
A_ : Tuple = tf_tokenizer(_a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _a ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ : Tuple = ModelToSave(tokenizer=_a )
A_ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Any = model.serving(_a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A_ : Dict = Path(_a ) / """saved.model"""
tf.saved_model.save(_a ,_a ,signatures={"""serving_default""": model.serving} )
A_ : Dict = tf.saved_model.load(_a )
A_ : Union[str, Any] = loaded_model.signatures["""serving_default"""](_a )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A_ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Optional[Any] = tf_tokenizer(_a ) # Build model with some sample inputs
A_ : Tuple = tf_tokenizer.get_config()
A_ : Tuple = TFGPTaTokenizer.from_config(_a )
A_ : Optional[int] = model_from_config(_a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _a ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A_ : Any = 123123
for max_length in [3, 5, 1024]:
A_ : Any = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Optional[int] = tf_tokenizer(_a ,max_length=_a )
A_ : int = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = TextToVideoSDPipeline
a_ = TEXT_TO_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
A_ : Tuple = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
A_ : List[Any] = CLIPTextModel(_a )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _a ( self : str ,_a : List[Any] ,_a : Union[str, Any]=0 ):
'''simple docstring'''
if str(_a ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : str = self.get_dummy_components()
A_ : Optional[Any] = TextToVideoSDPipeline(**_a )
A_ : str = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = self.get_dummy_inputs(_a )
A_ : int = """np"""
A_ : List[str] = sd_pipe(**_a ).frames
A_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A_ : List[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Any ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _a ( self : Union[str, Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a ,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
pass
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
A_ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A_ : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A_ : List[Any] = pipe.to("""cuda""" )
A_ : Optional[Any] = """Spiderman is surfing"""
A_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : List[str] = pipe(_a ,generator=_a ,num_inference_steps=25 ,output_type="""pt""" ).frames
A_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _a ( self : int ):
'''simple docstring'''
A_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A_ : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A_ : Any = pipe.to("""cuda""" )
A_ : Any = """Spiderman is surfing"""
A_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : List[str] = pipe(_a ,generator=_a ,num_inference_steps=2 ,output_type="""pt""" ).frames
A_ : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased"):
A_ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase)
A_ : Tuple = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Any):
# max_length=None => use the model max length (it's actually the default)
A_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : str = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : str):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
A_ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
A_ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Tuple):
model.eval()
A_ : Union[str, Any] = 0
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Optional[int] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
A_ : Tuple = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase) - 1:
A_ : int = predictions[: len(eval_dataloader.dataset) - samples_seen]
A_ : List[str] = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : str = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
# Initialize accelerator
A_ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[str] = config["""lr"""]
A_ : Optional[int] = int(config["""num_epochs"""])
A_ : Optional[Any] = int(config["""seed"""])
A_ : str = int(config["""batch_size"""])
A_ : Any = args.model_name_or_path
set_seed(lowerCamelCase)
A_ : Optional[Any] = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase)
# Instantiate optimizer
A_ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCamelCase)
if accelerator.state.deepspeed_plugin is not None:
A_ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A_ : Dict = 1
A_ : Optional[int] = (len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
A_ : Optional[Any] = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ : Optional[Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# We need to keep track of how many total steps we have iterated over
A_ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : Dict = 0
A_ : Optional[Any] = evaluate.load("""glue""" , """mrpc""")
A_ : str = num_epochs
if args.partial_train_epoch is not None:
A_ : Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
A_ : List[str] = args.resume_from_checkpoint.split("""epoch_""")[1]
A_ : Optional[Any] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A_ : Optional[Any] = int(lowerCamelCase) + 1
A_ : Dict = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
accelerator.print("""resumed checkpoint performance:""" , lowerCamelCase)
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0])
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""])
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json') , """r""") as f:
A_ : Union[str, Any] = json.load(lowerCamelCase)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A_ : Tuple = {}
for epoch in range(lowerCamelCase , lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
A_ : Dict = model(**lowerCamelCase)
A_ : str = outputs.loss
A_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A_ : List[str] = F'epoch_{epoch}'
A_ : Optional[Any] = os.path.join(args.output_dir , lowerCamelCase)
accelerator.save_state(lowerCamelCase)
A_ : List[Any] = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : str = accuracy
A_ : List[Any] = lr_scheduler.get_lr()[0]
A_ : int = optimizer.param_groups[0]["""lr"""]
A_ : Any = epoch
A_ : Optional[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json') , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase , default=lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCamelCase , default=lowerCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=2 , help="""Number of train epochs.""" , )
A_ : Tuple = parser.parse_args()
A_ : Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
a_ = None
def lowerCamelCase ( lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[int] , ):
import pyspark
def generate_fn():
A_ : Tuple = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id"""))
for partition_id in partition_order:
A_ : List[str] = df_with_partition_id.select("""*""").where(F'part_id = {partition_id}').drop("""part_id""")
A_ : List[Any] = partition_df.collect()
A_ : List[Any] = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __lowerCAmelCase ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : "pyspark.sql.DataFrame" ,_a : Union[str, Any]=None ,):
'''simple docstring'''
A_ : Optional[int] = df
A_ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
A_ : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def _a ( self : Optional[int] ,_a : np.random.Generator ):
'''simple docstring'''
A_ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_a )
return SparkExamplesIterable(self.df ,partition_order=_a )
def _a ( self : Optional[Any] ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.split_shard_indices_by_worker(_a ,_a )
return SparkExamplesIterable(self.df ,partition_order=_a )
@property
def _a ( self : Tuple ):
'''simple docstring'''
return len(self.partition_order )
class __lowerCAmelCase ( datasets.DatasetBuilder ):
'''simple docstring'''
a_ = SparkConfig
def __init__( self : Any ,_a : "pyspark.sql.DataFrame" ,_a : str = None ,_a : str = None ,**_a : Optional[int] ,):
'''simple docstring'''
import pyspark
A_ : int = pyspark.sql.SparkSession.builder.getOrCreate()
A_ : Optional[int] = df
A_ : List[Any] = working_dir
super().__init__(
cache_dir=_a ,config_name=str(self.df.semanticHash() ) ,**_a ,)
def _a ( self : int ):
'''simple docstring'''
def create_cache_and_write_probe(_a : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=_a )
A_ : Optional[Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_a ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A_ : Any = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_a ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def _a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : List[Any] ,_a : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(_a : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
A_ : int = self.df.count()
A_ : str = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A_ : Optional[Any] = (
self.df.limit(_a )
.repartition(1 )
.mapInArrow(_a ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A_ : Optional[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A_ : List[str] = min(_a ,int(approx_total_size / max_shard_size ) )
A_ : Optional[Any] = self.df.repartition(_a )
def _a ( self : int ,_a : str ,_a : str ,_a : int ,):
'''simple docstring'''
import pyspark
A_ : List[Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
A_ : Optional[Any] = os.path.join(self._working_dir ,os.path.basename(_a ) ) if self._working_dir else fpath
A_ : int = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A_ : Dict = self.config.features
A_ : Optional[Any] = self._writer_batch_size
A_ : Any = self._fs.storage_options
def write_arrow(_a : Union[str, Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A_ : str = pyspark.TaskContext().taskAttemptId()
A_ : Tuple = next(_a ,_a )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
A_ : Optional[int] = 0
A_ : Any = writer_class(
features=_a ,path=working_fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,writer_batch_size=_a ,storage_options=_a ,embed_local_files=_a ,)
A_ : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(_a )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A_ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
A_ : Dict = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,writer_batch_size=_a ,storage_options=_a ,embed_local_files=_a ,)
A_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(_a )
if writer._num_bytes > 0:
A_ : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_a ) ):
A_ : List[str] = os.path.join(os.path.dirname(_a ) ,os.path.basename(_a ) )
shutil.move(_a ,_a )
A_ : Dict = (
self.df.mapInArrow(_a ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a ( self : Union[str, Any] ,_a : "datasets.SplitGenerator" ,_a : str = "arrow" ,_a : Optional[Union[str, int]] = None ,_a : Optional[int] = None ,**_a : int ,):
'''simple docstring'''
self._validate_cache_dir()
A_ : Any = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_a )
A_ : List[str] = not is_remote_filesystem(self._fs )
A_ : List[str] = os.path.join if is_local else posixpath.join
A_ : List[str] = """-TTTTT-SSSSS-of-NNNNN"""
A_ : List[str] = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
A_ : Tuple = path_join(self._output_dir ,_a )
A_ : Dict = 0
A_ : Optional[Any] = 0
A_ : Optional[Any] = 0
A_ : Any = []
A_ : str = []
for task_id, content in self._prepare_split_single(_a ,_a ,_a ):
(
A_
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_a )
A_ : Any = total_num_examples
A_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
A_ : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A_ : Optional[int] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_a : int ,_a : int ,_a : int ,):
rename(
_a ,fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,fpath.replace("""TTTTT-SSSSS""" ,f'{global_shard_id:05d}' ).replace("""NNNNN""" ,f'{total_shards:05d}' ) ,)
A_ : int = []
A_ : Union[str, Any] = 0
for i in range(len(_a ) ):
A_ : Any = task_id_and_num_shards[i]
for shard_id in range(_a ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_a ,len(_a ) ).map(lambda _a : _rename_shard(*_a ) ).collect()
else:
# don't use any pattern
A_ : str = 0
A_ : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,fpath.replace(_a ,"""""" ) ,)
def _a ( self : Optional[int] ,_a : "datasets.SplitGenerator" ,):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : Tuple ,_a : Optional[Any]=13 ,_a : Optional[int]=7 ,_a : Optional[Any]=True ,_a : Optional[Any]=True ,_a : List[str]=True ,_a : str=True ,_a : Dict=99 ,_a : Optional[int]=32 ,_a : Optional[int]=5 ,_a : int=4 ,_a : Tuple=37 ,_a : int="gelu" ,_a : Tuple=0.1 ,_a : Union[str, Any]=0.1 ,_a : Optional[Any]=512 ,_a : Dict=16 ,_a : List[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Union[str, Any]=3 ,_a : List[str]=4 ,_a : List[Any]=None ,):
'''simple docstring'''
A_ : Optional[int] = parent
A_ : int = batch_size
A_ : List[Any] = seq_length
A_ : Tuple = is_training
A_ : str = use_input_mask
A_ : int = use_token_type_ids
A_ : Dict = use_labels
A_ : List[str] = vocab_size
A_ : List[str] = hidden_size
A_ : Dict = num_hidden_layers
A_ : str = num_attention_heads
A_ : int = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : int = num_labels
A_ : int = num_choices
A_ : int = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : Union[str, Any] = None
A_ : Dict = None
A_ : Tuple = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : str ,_a : List[str] ,_a : List[str] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : List[str] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = NystromformerModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a ,token_type_ids=_a )
A_ : Any = model(_a ,token_type_ids=_a )
A_ : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[Any] ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
A_ : int = NystromformerForMaskedLM(config=_a )
model.to(_a )
model.eval()
A_ : Tuple = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Optional[Any] ,_a : Dict ,_a : Any ,_a : Optional[int] ,_a : str ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
A_ : List[str] = NystromformerForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(
_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self : str ,_a : Optional[int] ,_a : List[str] ,_a : List[str] ,_a : Any ,_a : Tuple ,_a : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.num_labels
A_ : Optional[int] = NystromformerForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[Any] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Optional[int] ,_a : List[Any] ,_a : int ,_a : Tuple ,_a : Optional[Any] ,_a : Any ,_a : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : List[Any] = NystromformerForTokenClassification(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : int ,_a : Tuple ,_a : List[Any] ,_a : Optional[Any] ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : Any ):
'''simple docstring'''
A_ : Optional[int] = self.num_choices
A_ : Tuple = NystromformerForMultipleChoice(config=_a )
model.to(_a )
model.eval()
A_ : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[Any] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
A_
) : str = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : int ):
'''simple docstring'''
A_ : Any = NystromformerModelTester(self )
A_ : Any = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : List[str] = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _a ( self : Any ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _a ( self : int ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = NystromformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
A_ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
A_ : int = model(_a )[0]
A_ : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,_a )
A_ : Tuple = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = """the [MASK] of Belgium is Brussels"""
A_ : List[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
A_ : List[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
A_ : Tuple = tokenizer(_a ,return_tensors="""pt""" )
with torch.no_grad():
A_ : Union[str, Any] = model(encoding.input_ids ).logits
A_ : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_a ) ,"""capital""" )
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list[int]):
A_ : str = []
if len(lowerCamelCase) == 1:
return [nums.copy()]
for _ in range(len(lowerCamelCase)):
A_ : Optional[Any] = nums.pop(0)
A_ : Tuple = permute(lowerCamelCase)
for perm in permutations:
perm.append(lowerCamelCase)
result.extend(lowerCamelCase)
nums.append(lowerCamelCase)
return result
def lowerCamelCase ( lowerCamelCase : Dict):
def backtrack(lowerCamelCase : Tuple):
if start == len(lowerCamelCase) - 1:
output.append(nums[:])
else:
for i in range(lowerCamelCase , len(lowerCamelCase)):
A_ : Any = nums[i], nums[start]
backtrack(start + 1)
A_ : List[str] = nums[i], nums[start] # backtrack
A_ : List[str] = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__magic_name__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Tuple = filter(lambda lowerCamelCase: p.requires_grad , model.parameters())
A_ : List[str] = sum([np.prod(p.size()) for p in model_parameters])
return params
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : str):
if metric == "rouge2":
A_ : List[str] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
A_ : List[str] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
A_ : Dict = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""")
A_ : Any = ModelCheckpoint(
dirpath=lowerCamelCase , filename=lowerCamelCase , monitor=F'val_{metric}' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int]):
return EarlyStopping(
monitor=F'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=lowerCamelCase , verbose=lowerCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def _a ( self : str ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : int = {f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def _a ( self : Dict ,_a : pl.Trainer ,_a : pl.LightningModule ,_a : str ,_a : Union[str, Any]=True ):
'''simple docstring'''
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
A_ : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
A_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
A_ : Dict = od / """test_results.txt"""
A_ : Optional[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A_ : Union[str, Any] = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
A_ : Optional[Any] = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a ,"""a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
A_ : Optional[int] = metrics[key]
if isinstance(_a ,torch.Tensor ):
A_ : Union[str, Any] = val.item()
A_ : Dict = f'{key}: {val:.6f}\n'
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
A_ : int = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def _a ( self : Optional[int] ,_a : List[Any] ,_a : str ):
'''simple docstring'''
try:
A_ : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
A_ : Tuple = pl_module.model.num_parameters()
A_ : str = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _a ( self : Optional[Any] ,_a : pl.Trainer ,_a : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_a ,_a ,"""test""" )
@rank_zero_only
def _a ( self : Union[str, Any] ,_a : pl.Trainer ,_a : List[Any] ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__magic_name__ = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list , lowerCamelCase : int , lowerCamelCase : int = 0 , lowerCamelCase : int = 0):
A_ : str = right or len(lowerCamelCase) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCamelCase , lowerCamelCase , left + 1 , right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list):
A_ : List[Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
A_ : Optional[Any] = True
for i in range(0 , len(lowerCamelCase) - 1 , 2): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
A_ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
A_ : Optional[Any] = False
for i in range(1 , len(lowerCamelCase) - 1 , 2): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
A_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
A_ : Tuple = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
__magic_name__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
__magic_name__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """"""
a_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a_ = None # compression type in fsspec. ex: "gzip"
a_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : int ,_a : str = "" ,_a : Optional[str] = None ,_a : Optional[dict] = None ,**_a : Optional[Any] ):
'''simple docstring'''
super().__init__(self ,**_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
A_ : Dict = fsspec.open(
_a ,mode="""rb""" ,protocol=_a ,compression=self.compression ,client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
A_ : Dict = os.path.basename(self.file.path.split("""::""" )[0] )
A_ : Dict = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
A_ : int = None
@classmethod
def _a ( cls : List[str] ,_a : str ):
'''simple docstring'''
return super()._strip_protocol(_a ).lstrip("""/""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
A_ : Optional[Any] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
A_ : Optional[Any] = {f["""name"""]: f}
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.file.open().read()
def _a ( self : Optional[int] ,_a : str ,_a : str = "rb" ,_a : Any=None ,_a : str=True ,_a : int=None ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """bz2"""
a_ = """bz2"""
a_ = """.bz2"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gzip"""
a_ = """gzip"""
a_ = """.gz"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """lz4"""
a_ = """lz4"""
a_ = """.lz4"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """xz"""
a_ = """xz"""
a_ = """.xz"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """zstd"""
a_ = """zstd"""
a_ = """.zst"""
def __init__( self : Union[str, Any] ,_a : str ,_a : str = "rb" ,_a : Optional[str] = None ,_a : Optional[dict] = None ,_a : int = DEFAULT_BLOCK_SIZE ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
fo=_a ,mode=_a ,target_protocol=_a ,target_options=_a ,block_size=_a ,**_a ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
A_ : Optional[Any] = self.file.__enter__
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
A_ : Tuple = file_
def __enter__( self : Tuple ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] ,*_a : Tuple ,**_a : List[str] ):
'''simple docstring'''
self._file.__exit__(*_a ,**_a )
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._file )
def _a ( self : Dict ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : str ,_a : Tuple ):
'''simple docstring'''
return getattr(self._file ,_a )
def fixed_enter(*_a : Dict ,**_a : Dict ):
return WrappedFile(_enter(*_a ,**_a ) )
A_ : List[str] = fixed_enter
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__magic_name__ = random.Random()
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]=1.0 , lowerCamelCase : Tuple=None , lowerCamelCase : Dict=None):
if rng is None:
A_ : Any = global_rng
A_ : Optional[Any] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Dict ,_a : Tuple=7 ,_a : Tuple=400 ,_a : Any=2000 ,_a : Union[str, Any]=10 ,_a : Tuple=160 ,_a : Optional[int]=8 ,_a : Optional[Any]=0.0 ,_a : Optional[Any]=4000 ,_a : List[Any]=False ,_a : Tuple=True ,):
'''simple docstring'''
A_ : str = parent
A_ : Any = batch_size
A_ : Tuple = min_seq_length
A_ : str = max_seq_length
A_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ : int = padding_value
A_ : str = sampling_rate
A_ : List[str] = return_attention_mask
A_ : Union[str, Any] = do_normalize
A_ : Dict = feature_size
A_ : int = chunk_length
A_ : Union[str, Any] = hop_length
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self : Optional[Any] ,_a : Dict=False ,_a : List[Any]=False ):
'''simple docstring'''
def _flatten(_a : Dict ):
return list(itertools.chain(*_a ) )
if equal_length:
A_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
A_ : Optional[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self : str ):
'''simple docstring'''
A_ : str = WhisperFeatureExtractionTester(self )
def _a ( self : str ):
'''simple docstring'''
A_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Dict = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
A_ : Optional[Any] = self.feature_extraction_class.from_pretrained(_a )
A_ : Any = feat_extract_first.to_dict()
A_ : Any = feat_extract_second.to_dict()
A_ : str = feat_extract_first.mel_filters
A_ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a ,_a ) )
self.assertEqual(_a ,_a )
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[int] = os.path.join(_a ,"""feat_extract.json""" )
feat_extract_first.to_json_file(_a )
A_ : str = self.feature_extraction_class.from_json_file(_a )
A_ : Any = feat_extract_first.to_dict()
A_ : List[str] = feat_extract_second.to_dict()
A_ : Any = feat_extract_first.mel_filters
A_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a ,_a ) )
self.assertEqual(_a ,_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
A_ : str = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
A_ : List[Any] = feature_extractor(_a ,padding="""max_length""" ,return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A_ : List[Any] = feature_extractor(speech_inputs[0] ,return_tensors="""np""" ).input_features
A_ : List[str] = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
# Test batched
A_ : List[str] = feature_extractor(_a ,return_tensors="""np""" ).input_features
A_ : str = feature_extractor(_a ,return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A_ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ : Union[str, Any] = np.asarray(_a )
A_ : Dict = feature_extractor(_a ,return_tensors="""np""" ).input_features
A_ : Dict = feature_extractor(_a ,return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
# Test truncation required
A_ : Tuple = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )]
A_ : int = [np.asarray(_a ) for speech_input in speech_inputs]
A_ : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A_ : Any = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
A_ : Any = feature_extractor(_a ,return_tensors="""np""" ).input_features
A_ : List[str] = feature_extractor(_a ,return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
import torch
A_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Dict = np.random.rand(100 ,32 ).astype(np.floataa )
A_ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A_ : Union[str, Any] = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A_ : Dict = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Union[str, Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
A_ : List[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A_ : Union[str, Any] = self._load_datasamples(1 )
A_ : int = WhisperFeatureExtractor()
A_ : Dict = feature_extractor(_a ,return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape ,(1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] ,_a ,atol=1e-4 ) )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Tuple = self._load_datasamples(1 )[0]
A_ : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
A_ : str = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[str]):
A_ : List[Any] = original_name.split(""".""")[0]
A_ : str = key.split(""".""")
A_ : List[Any] = int(key_list[key_list.index(lowerCamelCase) - 2])
A_ : List[Any] = int(key_list[key_list.index(lowerCamelCase) - 1])
A_ : List[Any] = orig_block_num - offset
A_ : Optional[Any] = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}')
return key
def lowerCamelCase ( lowerCamelCase : Any):
A_ : str = OrderedDict()
A_ : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
A_ : List[str] = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
A_ : Optional[int] = key[: key.find("""proj""")]
A_ : List[str] = key.replace(lowerCamelCase , F'patch_embeddings.{total_embed_found}.')
A_ : Tuple = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
A_ : Tuple = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
A_ : Any = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
A_ : str = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
A_ : Dict = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """norm1""" , """before_norm""")
if "norm2" in key:
A_ : Any = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
A_ : Any = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
A_ : List[Any] = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
A_ : int = key.replace("""head""" , """classifier""")
A_ : Optional[int] = value
return new_state_dict
def lowerCamelCase ( ):
A_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return image
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict):
A_ : List[str] = PoolFormerConfig()
# set attributes based on model_name
A_ : Tuple = """huggingface/label-files"""
A_ : List[Any] = model_name[-3:]
A_ : int = 1000
A_ : Tuple = """imagenet-1k-id2label.json"""
A_ : str = (1, 1000)
# set config attributes
A_ : Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : List[str] = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Tuple = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ : List[str] = [2, 2, 6, 2]
A_ : str = [64, 128, 320, 512]
A_ : Optional[Any] = 4.0
A_ : Tuple = 0.9
elif size == "s24":
A_ : str = [4, 4, 12, 4]
A_ : Any = [64, 128, 320, 512]
A_ : Any = 4.0
A_ : Union[str, Any] = 0.9
elif size == "s36":
A_ : Optional[int] = [6, 6, 18, 6]
A_ : List[Any] = [64, 128, 320, 512]
A_ : Optional[Any] = 4.0
A_ : str = 1E-6
A_ : Optional[int] = 0.9
elif size == "m36":
A_ : Optional[int] = [6, 6, 18, 6]
A_ : Optional[int] = [96, 192, 384, 768]
A_ : List[Any] = 4.0
A_ : List[str] = 1E-6
A_ : List[Any] = 0.95
elif size == "m48":
A_ : Optional[int] = [8, 8, 24, 8]
A_ : Optional[int] = [96, 192, 384, 768]
A_ : Dict = 4.0
A_ : List[str] = 1E-6
A_ : Tuple = 0.95
else:
raise ValueError(F'Size {size} not supported')
# load image processor
A_ : str = PoolFormerImageProcessor(crop_pct=lowerCamelCase)
# Prepare image
A_ : Union[str, Any] = prepare_img()
A_ : Tuple = image_processor(images=lowerCamelCase , return_tensors="""pt""").pixel_values
logger.info(F'Converting model {model_name}...')
# load original state dict
A_ : Optional[int] = torch.load(lowerCamelCase , map_location=torch.device("""cpu"""))
# rename keys
A_ : Union[str, Any] = rename_keys(lowerCamelCase)
# create HuggingFace model and load state dict
A_ : Optional[int] = PoolFormerForImageClassification(lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
# Define image processor
A_ : int = PoolFormerImageProcessor(crop_pct=lowerCamelCase)
A_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
A_ : str = model(lowerCamelCase)
A_ : Optional[int] = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ : List[str] = torch.tensor([-0.3045, -0.6758, -0.4869])
elif size == "s24":
A_ : Optional[int] = torch.tensor([0.4402, -0.1374, -0.8045])
elif size == "s36":
A_ : Tuple = torch.tensor([-0.6080, -0.5133, -0.5898])
elif size == "m36":
A_ : str = torch.tensor([0.3952, 0.2263, -1.2668])
elif size == "m48":
A_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423])
else:
raise ValueError(F'Size {size} not supported')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-2)
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...')
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase ( ):
A_ : Dict = """mock-s3-bucket"""
A_ : str = F's3://{mock_bucket}'
A_ : List[Any] = extract_path_from_uri(lowerCamelCase)
assert dataset_path.startswith("""s3://""") is False
A_ : List[str] = """./local/path"""
A_ : Any = extract_path_from_uri(lowerCamelCase)
assert dataset_path == new_dataset_path
def lowerCamelCase ( lowerCamelCase : int):
A_ : Union[str, Any] = is_remote_filesystem(lowerCamelCase)
assert is_remote is True
A_ : Optional[Any] = fsspec.filesystem("""file""")
A_ : Optional[int] = is_remote_filesystem(lowerCamelCase)
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Any):
A_ : Any = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
A_ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
A_ : Optional[int] = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase)
A_ : List[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase)
assert isinstance(lowerCamelCase , lowerCamelCase)
A_ : List[Any] = os.path.basename(lowerCamelCase)
A_ : Optional[int] = expected_filename[: expected_filename.rindex(""".""")]
assert fs.glob("""*""") == [expected_filename]
with fs.open(lowerCamelCase , """r""" , encoding="""utf-8""") as f, open(lowerCamelCase , encoding="""utf-8""") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""])
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]):
A_ : Union[str, Any] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
A_ : Any = compressed_file_paths[protocol]
A_ : List[Any] = """dataset.jsonl"""
A_ : List[str] = F'{protocol}://{member_file_path}::{compressed_file_path}'
A_ : int = fsspec.get_fs_token_paths(lowerCamelCase)
assert fs.isfile(lowerCamelCase)
assert not fs.isfile("""non_existing_""" + member_file_path)
@pytest.mark.integration
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]):
A_ : Optional[Any] = hf_api.dataset_info(lowerCamelCase , token=lowerCamelCase)
A_ : Optional[int] = HfFileSystem(repo_info=lowerCamelCase , token=lowerCamelCase)
assert sorted(hffs.glob("""*""")) == [".gitattributes", "data"]
assert hffs.isdir("""data""")
assert hffs.isfile(""".gitattributes""") and hffs.isfile("""data/text_data.txt""")
with open(lowerCamelCase) as f:
assert hffs.open("""data/text_data.txt""" , """r""").read() == f.read()
def lowerCamelCase ( ):
A_ : Union[str, Any] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase , lowerCamelCase , clobber=lowerCamelCase)
with pytest.warns(lowerCamelCase) as warning_info:
importlib.reload(datasets.filesystems)
assert len(lowerCamelCase) == 1
assert (
str(warning_info[0].message)
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
import sys
__magic_name__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase ( lowerCamelCase : str):
A_ : int = 1
for digit in s:
product *= int(lowerCamelCase)
return product
def lowerCamelCase ( lowerCamelCase : str = N):
A_ : Optional[int] = -sys.maxsize - 1
A_ : Any = n[:13]
A_ : Union[str, Any] = 13
while cur_index < len(lowerCamelCase) - 13:
if int(n[cur_index]) >= int(substr[0]):
A_ : Optional[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
A_ : Dict = max(lowerCamelCase , str_eval(lowerCamelCase))
A_ : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """roformer"""
def __init__( self : List[Any] ,_a : Tuple=50000 ,_a : List[str]=None ,_a : int=768 ,_a : List[str]=12 ,_a : Optional[Any]=12 ,_a : Union[str, Any]=3072 ,_a : Optional[int]="gelu" ,_a : Dict=0.1 ,_a : List[str]=0.1 ,_a : Any=1536 ,_a : Optional[Any]=2 ,_a : List[Any]=0.02 ,_a : Dict=1e-12 ,_a : Union[str, Any]=0 ,_a : List[str]=False ,_a : str=True ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Optional[int] = vocab_size
A_ : str = hidden_size if embedding_size is None else embedding_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[str] = hidden_act
A_ : str = intermediate_size
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : List[str] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : Optional[Any] = rotary_value
A_ : Union[str, Any] = use_cache
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
A_ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : list[list[str]] , lowerCamelCase : int , ):
'''simple docstring'''
A_ : Dict = len(lowerCamelCase)
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board])
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase , lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : int):
'''simple docstring'''
A_ : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase , lowerCamelCase)
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase)
print("""""")
print(len(lowerCamelCase) , """solutions were found.""")
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__magic_name__ = {'bert_for_seq_generation': 512}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = []
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any ,_a : Optional[Any] ,_a : Union[str, Any]="<s>" ,_a : int="</s>" ,_a : List[str]="<unk>" ,_a : Any="<pad>" ,_a : List[str]="<::::>" ,_a : Optional[Dict[str, Any]] = None ,**_a : str ,):
'''simple docstring'''
A_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : List[str] = vocab_file
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
'''simple docstring'''
A_ : List[Any] = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self : Tuple ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Optional[Any] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : int ,_a : Optional[Any] ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : Any ,_a : int ):
'''simple docstring'''
A_ : Tuple = self.sp_model.IdToPiece(_a )
return token
def _a ( self : List[str] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Tuple = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : Dict ,_a : Any=2 ,_a : Dict=8 ,_a : str=True ,_a : List[Any]=True ,_a : int=True ,_a : Union[str, Any]=True ,_a : List[str]=99 ,_a : Any=16 ,_a : Optional[Any]=5 ,_a : Any=2 ,_a : List[Any]=36 ,_a : Any="gelu" ,_a : str=0.0 ,_a : List[str]=0.0 ,_a : Union[str, Any]=512 ,_a : Dict=16 ,_a : str=2 ,_a : Any=0.02 ,_a : Union[str, Any]=3 ,_a : List[str]=4 ,_a : Optional[int]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Optional[Any] = seq_length
A_ : List[str] = is_training
A_ : Optional[int] = use_input_mask
A_ : List[str] = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : int = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : int = hidden_act
A_ : str = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Optional[Any] = initializer_range
A_ : Optional[int] = num_labels
A_ : Dict = num_choices
A_ : str = scope
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[Any] = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : Dict = None
A_ : List[str] = None
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_config()
A_ : Dict = 300
return config
def _a ( self : Union[str, Any] ):
'''simple docstring'''
(
A_
) : Union[str, Any] = self.prepare_config_and_inputs()
A_ : Optional[int] = True
A_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self : Tuple ,_a : str ,_a : Optional[int] ,_a : List[Any] ,_a : Optional[int] ,_a : str ,_a : str ,_a : List[Any] ):
'''simple docstring'''
A_ : List[Any] = MraModel(config=_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,token_type_ids=_a )
A_ : List[str] = model(_a ,token_type_ids=_a )
A_ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int ,_a : List[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : Optional[Any] ,_a : List[str] ,_a : int ,_a : Union[str, Any] ,_a : str ,_a : List[Any] ,):
'''simple docstring'''
A_ : Optional[int] = True
A_ : Any = MraModel(_a )
model.to(_a )
model.eval()
A_ : List[Any] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : str = model(
_a ,attention_mask=_a ,token_type_ids=_a ,encoder_hidden_states=_a ,)
A_ : Union[str, Any] = model(_a ,attention_mask=_a ,token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[Any] ,_a : int ,_a : Optional[Any] ,_a : int ,_a : Tuple ,_a : Tuple ,_a : Dict ,_a : str ):
'''simple docstring'''
A_ : Optional[Any] = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Optional[Any] ,_a : str ,_a : Optional[Any] ,_a : Tuple ,_a : Union[str, Any] ,_a : int ,_a : List[Any] ,_a : str ):
'''simple docstring'''
A_ : str = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
A_ : Any = model(
_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self : str ,_a : int ,_a : str ,_a : Optional[int] ,_a : Any ,_a : Optional[Any] ,_a : str ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.num_labels
A_ : Dict = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Any ,_a : Optional[Any] ,_a : Optional[int] ,_a : str ,_a : Optional[int] ,_a : Optional[Any] ,_a : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Any = self.num_labels
A_ : Union[str, Any] = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Union[str, Any] ,_a : str ,_a : List[str] ,_a : Tuple ,_a : Optional[Any] ,_a : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
A_ : Any = self.num_choices
A_ : int = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[int] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _a ( self : str ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
(
A_
) : Optional[Any] = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[str] = MraModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : List[Any] = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason="""MRA does not output attentions""" )
def _a ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Any = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A_ : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A_ : Any = model(_a )[0]
A_ : str = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,_a )
A_ : List[Any] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A_ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A_ : Tuple = model(_a )[0]
A_ : str = 50265
A_ : Dict = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,_a )
A_ : Tuple = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A_ : Optional[int] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A_ : List[Any] = model(_a )[0]
A_ : List[str] = 50265
A_ : Union[str, Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape ,_a )
A_ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = 1
@register_to_config
def __init__( self : str ,_a : Dict=2000 ,_a : Optional[Any]=0.1 ,_a : List[str]=20 ,_a : Tuple=1e-3 ):
'''simple docstring'''
A_ : List[str] = None
A_ : Tuple = None
A_ : Any = None
def _a ( self : int ,_a : Optional[int] ,_a : Union[str, torch.device] = None ):
'''simple docstring'''
A_ : int = torch.linspace(1 ,self.config.sampling_eps ,_a ,device=_a )
def _a ( self : int ,_a : str ,_a : Optional[int] ,_a : str ,_a : List[Any]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
A_ : str = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
A_ : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
A_ : Any = std.flatten()
while len(std.shape ) < len(score.shape ):
A_ : List[str] = std.unsqueeze(-1 )
A_ : Dict = -score / std
# compute
A_ : Dict = -1.0 / len(self.timesteps )
A_ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
A_ : Optional[int] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
A_ : str = beta_t.unsqueeze(-1 )
A_ : int = -0.5 * beta_t * x
A_ : List[str] = torch.sqrt(_a )
A_ : Any = drift - diffusion**2 * score
A_ : Optional[int] = x + drift * dt
# add noise
A_ : Optional[Any] = randn_tensor(x.shape ,layout=x.layout ,generator=_a ,device=x.device ,dtype=x.dtype )
A_ : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['MobileViTFeatureExtractor']
__magic_name__ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__magic_name__ = 'examples/'
__magic_name__ = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__magic_name__ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
__magic_name__ = 'README.md'
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]):
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
A_ : int = f.read()
A_ : Union[str, Any] = REPLACE_PATTERNS[pattern]
A_ : Union[str, Any] = replace.replace("""VERSION""" , lowerCamelCase)
A_ : List[Any] = re_pattern.sub(lowerCamelCase , lowerCamelCase)
with open(lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.write(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : str):
for folder, directories, fnames in os.walk(lowerCamelCase):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""")
if "legacy" in directories:
directories.remove("""legacy""")
for fname in fnames:
if fname.endswith(""".py"""):
update_version_in_file(os.path.join(lowerCamelCase , lowerCamelCase) , lowerCamelCase , pattern="""examples""")
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Tuple=False):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase , lowerCamelCase , lowerCamelCase)
if not patch:
update_version_in_examples(lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[Any] = """🤗 Transformers currently provides the following architectures"""
A_ : Optional[int] = """1. Want to contribute a new model?"""
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
A_ : List[Any] = f.readlines()
# Find the start of the list.
A_ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
A_ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith("""1."""):
A_ : Optional[int] = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(lowerCamelCase)
def lowerCamelCase ( ):
with open(REPLACE_FILES["""init"""] , """r""") as f:
A_ : Optional[Any] = f.read()
A_ : int = REPLACE_PATTERNS["""init"""][0].search(lowerCamelCase).groups()[0]
return packaging.version.parse(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Dict=False):
A_ : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""")
if default_version.is_devrelease:
A_ : Optional[int] = default_version.base_version
elif patch:
A_ : int = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
A_ : Union[str, Any] = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
A_ : Tuple = input(F'Which version are you releasing? [{default_version}]')
if len(lowerCamelCase) == 0:
A_ : List[str] = default_version
print(F'Updating version to {version}.')
global_version_update(lowerCamelCase , patch=lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[Any] = get_version()
A_ : Tuple = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
A_ : List[str] = current_version.base_version
# Check with the user we got that right.
A_ : List[Any] = input(F'Which version are we developing now? [{dev_version}]')
if len(lowerCamelCase) == 0:
A_ : Tuple = dev_version
print(F'Updating version to {version}.')
global_version_update(lowerCamelCase)
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__magic_name__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__magic_name__ = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'num_train_timesteps': 40,
'sigma_min': 0.0_0_2,
'sigma_max': 80.0,
}
__magic_name__ = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 80.0,
}
__magic_name__ = {
'num_train_timesteps': 151,
'sigma_min': 0.0_0_2,
'sigma_max': 80.0,
}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
if isinstance(lowerCamelCase , lowerCamelCase):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Tuple=False):
A_ : Dict = checkpoint[F'{old_prefix}.in_layers.0.weight']
A_ : Optional[int] = checkpoint[F'{old_prefix}.in_layers.0.bias']
A_ : Union[str, Any] = checkpoint[F'{old_prefix}.in_layers.2.weight']
A_ : Union[str, Any] = checkpoint[F'{old_prefix}.in_layers.2.bias']
A_ : List[str] = checkpoint[F'{old_prefix}.emb_layers.1.weight']
A_ : int = checkpoint[F'{old_prefix}.emb_layers.1.bias']
A_ : int = checkpoint[F'{old_prefix}.out_layers.0.weight']
A_ : Union[str, Any] = checkpoint[F'{old_prefix}.out_layers.0.bias']
A_ : Optional[Any] = checkpoint[F'{old_prefix}.out_layers.3.weight']
A_ : Optional[int] = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
A_ : str = checkpoint[F'{old_prefix}.skip_connection.weight']
A_ : List[str] = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None):
A_ : Optional[Any] = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0)
A_ : List[Any] = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0)
A_ : List[Any] = checkpoint[F'{old_prefix}.norm.weight']
A_ : Tuple = checkpoint[F'{old_prefix}.norm.bias']
A_ : Union[str, Any] = weight_q.squeeze(-1).squeeze(-1)
A_ : Dict = bias_q.squeeze(-1).squeeze(-1)
A_ : int = weight_k.squeeze(-1).squeeze(-1)
A_ : Tuple = bias_k.squeeze(-1).squeeze(-1)
A_ : Optional[Any] = weight_v.squeeze(-1).squeeze(-1)
A_ : Dict = bias_v.squeeze(-1).squeeze(-1)
A_ : int = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1).squeeze(-1)
)
A_ : int = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1).squeeze(-1)
return new_checkpoint
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Tuple):
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
A_ : Optional[Any] = {}
A_ : List[Any] = checkpoint["""time_embed.0.weight"""]
A_ : Dict = checkpoint["""time_embed.0.bias"""]
A_ : Optional[Any] = checkpoint["""time_embed.2.weight"""]
A_ : Any = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
A_ : Union[str, Any] = checkpoint["""label_emb.weight"""]
A_ : Union[str, Any] = checkpoint["""input_blocks.0.0.weight"""]
A_ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
A_ : Optional[int] = unet_config["""down_block_types"""]
A_ : Union[str, Any] = unet_config["""layers_per_block"""]
A_ : Optional[Any] = unet_config["""attention_head_dim"""]
A_ : Dict = unet_config["""block_out_channels"""]
A_ : Union[str, Any] = 1
A_ : Optional[int] = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase):
A_ : Any = channels_list[i]
A_ : Tuple = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase):
A_ : List[str] = F'down_blocks.{i}.resnets.{j}'
A_ : Optional[int] = F'input_blocks.{current_layer}.0'
A_ : Dict = True if j == 0 and downsample_block_has_skip else False
A_ : Dict = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase):
A_ : Optional[Any] = F'down_blocks.{i}.resnets.{j}'
A_ : Optional[Any] = F'input_blocks.{current_layer}.0'
A_ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
A_ : List[str] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
A_ : str = F'down_blocks.{i}.attentions.{j}'
A_ : List[str] = F'input_blocks.{current_layer}.1'
A_ : Optional[int] = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
current_layer += 1
if i != len(lowerCamelCase) - 1:
A_ : str = F'down_blocks.{i}.downsamplers.0'
A_ : List[str] = F'input_blocks.{current_layer}.0'
A_ : Tuple = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
current_layer += 1
A_ : str = current_channels
# hardcoded the mid-block for now
A_ : Optional[Any] = """mid_block.resnets.0"""
A_ : Dict = """middle_block.0"""
A_ : Any = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : int = """mid_block.attentions.0"""
A_ : Dict = """middle_block.1"""
A_ : Optional[int] = convert_attention(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : List[Any] = """mid_block.resnets.1"""
A_ : Dict = """middle_block.2"""
A_ : Optional[int] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : Optional[Any] = 0
A_ : int = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowerCamelCase):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1):
A_ : Tuple = F'up_blocks.{i}.resnets.{j}'
A_ : str = F'output_blocks.{current_layer}.0'
A_ : Tuple = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
current_layer += 1
if i != len(lowerCamelCase) - 1:
A_ : int = F'up_blocks.{i}.upsamplers.0'
A_ : Tuple = F'output_blocks.{current_layer-1}.1'
A_ : Tuple = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1):
A_ : Tuple = F'up_blocks.{i}.resnets.{j}'
A_ : Union[str, Any] = F'output_blocks.{current_layer}.0'
A_ : Union[str, Any] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
A_ : List[str] = F'up_blocks.{i}.attentions.{j}'
A_ : Optional[Any] = F'output_blocks.{current_layer}.1'
A_ : int = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
current_layer += 1
if i != len(lowerCamelCase) - 1:
A_ : str = F'up_blocks.{i}.upsamplers.0'
A_ : List[Any] = F'output_blocks.{current_layer-1}.2'
A_ : Dict = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : str = checkpoint["""out.0.weight"""]
A_ : Dict = checkpoint["""out.0.bias"""]
A_ : Optional[Any] = checkpoint["""out.2.weight"""]
A_ : int = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__magic_name__ = parser.parse_args()
__magic_name__ = strabool(args.class_cond)
__magic_name__ = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__magic_name__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__magic_name__ = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__magic_name__ = None
__magic_name__ = con_pt_to_diffuser(args.unet_path, unet_config)
__magic_name__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__magic_name__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__magic_name__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
__magic_name__ = CMStochasticIterativeScheduler(**scheduler_config)
__magic_name__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """Pix2StructImageProcessor"""
a_ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Union[str, Any] ,_a : List[str] ,_a : int ):
'''simple docstring'''
A_ : Tuple = False
super().__init__(_a ,_a )
def __call__( self : Tuple ,_a : Tuple=None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : Optional[int] = 2048 ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Dict ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
A_ : str = self.tokenizer
A_ : List[Any] = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A_ : Optional[int] = self.image_processor(
_a ,return_tensors=_a ,max_patches=_a ,**_a )
else:
# add pixel_values and bbox
A_ : List[str] = self.image_processor(
_a ,return_tensors=_a ,max_patches=_a ,header_text=_a ,**_a )
if text is not None and not self.image_processor.is_vqa:
A_ : Optional[Any] = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
if "attention_mask" in text_encoding:
A_ : Union[str, Any] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
A_ : Tuple = text_encoding.pop("""input_ids""" )
else:
A_ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def _a ( self : Optional[int] ,*_a : List[str] ,**_a : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : Tuple ,*_a : Optional[int] ,**_a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 703 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 0 |
'''simple docstring'''
import string
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[str] = """"""
for i in sequence:
A_ : Dict = ord(lowerCamelCase)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = string.ascii_letters
A_ : int = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCamelCase)] if c in letters else c for c in sequence)
def lowerCamelCase ( ):
from timeit import timeit
print("""Running performance benchmarks...""")
A_ : Optional[int] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCamelCase)} seconds')
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowerCamelCase)} seconds')
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 705 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__magic_name__ = list[list[float | int]]
def lowerCamelCase ( lowerCamelCase : Matrix , lowerCamelCase : Matrix):
A_ : int = len(lowerCamelCase)
A_ : Matrix = [[0 for _ in range(size + 1)] for _ in range(lowerCamelCase)]
A_ : int
A_ : int
A_ : int
A_ : int
A_ : int
A_ : float
for row in range(lowerCamelCase):
for col in range(lowerCamelCase):
A_ : List[Any] = matrix[row][col]
A_ : Union[str, Any] = vector[row][0]
A_ : str = 0
A_ : Dict = 0
while row < size and col < size:
# pivoting
A_ : Tuple = max((abs(augmented[rowa][col]), rowa) for rowa in range(lowerCamelCase , lowerCamelCase))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A_ : List[str] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCamelCase):
A_ : List[str] = augmented[rowa][col] / augmented[row][col]
A_ : str = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCamelCase):
for row in range(lowerCamelCase):
A_ : Union[str, Any] = augmented[row][col] / augmented[col][col]
for cola in range(lowerCamelCase , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(lowerCamelCase)
]
def lowerCamelCase ( lowerCamelCase : list[int]):
A_ : int = len(lowerCamelCase)
A_ : Matrix = [[0 for _ in range(lowerCamelCase)] for _ in range(lowerCamelCase)]
A_ : Matrix = [[0] for _ in range(lowerCamelCase)]
A_ : Matrix
A_ : int
A_ : int
A_ : int
for x_val, y_val in enumerate(lowerCamelCase):
for col in range(lowerCamelCase):
A_ : str = (x_val + 1) ** (size - col - 1)
A_ : Tuple = y_val
A_ : Tuple = solve(lowerCamelCase , lowerCamelCase)
def interpolated_func(lowerCamelCase : int) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(lowerCamelCase))
return interpolated_func
def lowerCamelCase ( lowerCamelCase : int):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase ( lowerCamelCase : Callable[[int], int] = question_function , lowerCamelCase : int = 10):
A_ : list[int] = [func(lowerCamelCase) for x_val in range(1 , order + 1)]
A_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
A_ : int = 0
A_ : Callable[[int], int]
A_ : int
for poly in polynomials:
A_ : int = 1
while func(lowerCamelCase) == poly(lowerCamelCase):
x_val += 1
ret += poly(lowerCamelCase)
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,_a : int ,_a : Tuple=13 ,_a : str=7 ,_a : str=True ,_a : Optional[Any]=True ,_a : List[Any]=True ,_a : Optional[Any]=True ,_a : List[Any]=99 ,_a : Union[str, Any]=32 ,_a : Optional[int]=5 ,_a : Optional[int]=4 ,_a : Union[str, Any]=37 ,_a : List[str]="gelu" ,_a : Tuple=0.1 ,_a : str=0.1 ,_a : Optional[int]=512 ,_a : Union[str, Any]=16 ,_a : Dict=2 ,_a : Any=0.02 ,_a : List[Any]=4 ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : Any = batch_size
A_ : str = seq_length
A_ : Union[str, Any] = is_training
A_ : Any = use_attention_mask
A_ : str = use_token_type_ids
A_ : Dict = use_labels
A_ : List[str] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : str = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : str = initializer_range
A_ : int = num_choices
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : Dict = None
if self.use_attention_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : List[Any] = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
A_ : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = True
a_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = FlaxRoFormerModelTester(self )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ : Optional[int] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" ,from_pt=_a )
A_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A_ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
A_ : List[Any] = model(_a )[0]
A_ : int = 50000
A_ : str = (1, 6, vocab_size)
self.assertEqual(output.shape ,_a )
A_ : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
| 707 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : Tuple = 0
A_ : Union[str, Any] = len(lowerCamelCase)
for i in range(n - 1):
for j in range(i + 1 , lowerCamelCase):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
if len(lowerCamelCase) <= 1:
return arr, 0
A_ : Optional[Any] = len(lowerCamelCase) // 2
A_ : List[Any] = arr[0:mid]
A_ : Tuple = arr[mid:]
A_ : str = count_inversions_recursive(lowerCamelCase)
A_ : str = count_inversions_recursive(lowerCamelCase)
A_ : Any = _count_cross_inversions(lowerCamelCase , lowerCamelCase)
A_ : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : str):
A_ : Optional[int] = []
A_ : Union[str, Any] = 0
while i < len(lowerCamelCase) and j < len(lowerCamelCase):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCamelCase) - i
r.append(q[j])
j += 1
else:
r.append(p[i])
i += 1
if i < len(lowerCamelCase):
r.extend(p[i:])
else:
r.extend(q[j:])
return r, num_inversion
def lowerCamelCase ( ):
A_ : str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A_ : Tuple = count_inversions_bf(lowerCamelCase)
A_ : Union[str, Any] = count_inversions_recursive(lowerCamelCase)
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCamelCase)
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A_ : str = count_inversions_bf(lowerCamelCase)
A_ : str = count_inversions_recursive(lowerCamelCase)
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCamelCase)
# an empty list should also have zero inversions
A_ : Any = []
A_ : List[str] = count_inversions_bf(lowerCamelCase)
A_ : Tuple = count_inversions_recursive(lowerCamelCase)
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCamelCase)
if __name__ == "__main__":
main()
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = AltDiffusionPipeline
a_ = TEXT_TO_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_BATCH_PARAMS
a_ = TEXT_TO_IMAGE_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
A_ : Dict = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
A_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
A_ : str = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
A_ : int = CLIPTextModel(_a )
A_ : Tuple = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A_ : Union[str, Any] = 77
A_ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : List[str] ,_a : str ,_a : List[str]=0 ):
'''simple docstring'''
if str(_a ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(_a )
else:
A_ : List[str] = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Union[str, Any] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _a ( self : Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
torch.manual_seed(0 )
A_ : Tuple = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
A_ : Union[str, Any] = RobertaSeriesModelWithTransformation(_a )
A_ : Optional[int] = text_encoder
A_ : str = AltDiffusionPipeline(**_a )
A_ : Any = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
A_ : Any = self.get_dummy_inputs(_a )
A_ : int = """A photo of an astronaut"""
A_ : int = alt_pipe(**_a )
A_ : Tuple = output.images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : List[Any] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
A_ : str = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
A_ : List[str] = RobertaSeriesModelWithTransformation(_a )
A_ : List[Any] = text_encoder
A_ : Any = AltDiffusionPipeline(**_a )
A_ : Optional[Any] = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
A_ : Any = self.get_dummy_inputs(_a )
A_ : Tuple = alt_pipe(**_a )
A_ : int = output.images
A_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Any = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=_a )
A_ : Any = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
A_ : List[str] = """A painting of a squirrel eating a burger"""
A_ : List[str] = torch.manual_seed(0 )
A_ : Dict = alt_pipe([prompt] ,generator=_a ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
A_ : Optional[Any] = output.images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : List[str] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
A_ : str = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=_a ,safety_checker=_a )
A_ : Dict = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = """A painting of a squirrel eating a burger"""
A_ : Dict = torch.manual_seed(0 )
A_ : int = alt_pipe([prompt] ,generator=_a ,num_inference_steps=2 ,output_type="""numpy""" )
A_ : Any = output.images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Dict = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 709 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float):
if density <= 0:
raise ValueError("""Impossible fluid density""")
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase)
if number < 1:
A_ : int = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase)
A_ : Optional[Any] = 1
for i in range(1 , lowerCamelCase):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """char"""
a_ = """bpe"""
a_ = """wp"""
__magic_name__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """char_tokenizer"""]
a_ = """ViTImageProcessor"""
a_ = """MgpstrTokenizer"""
def __init__( self : Dict ,_a : Optional[int]=None ,_a : Dict=None ,**_a : Optional[int] ):
'''simple docstring'''
A_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : Optional[int] = kwargs.pop("""feature_extractor""" )
A_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
A_ : Union[str, Any] = tokenizer
A_ : List[Any] = AutoTokenizer.from_pretrained("""gpt2""" )
A_ : str = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(_a ,_a )
def __call__( self : Optional[int] ,_a : str=None ,_a : int=None ,_a : int=None ,**_a : Any ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
A_ : int = self.image_processor(_a ,return_tensors=_a ,**_a )
if text is not None:
A_ : Tuple = self.char_tokenizer(_a ,return_tensors=_a ,**_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
A_ : int = encodings["""input_ids"""]
return inputs
def _a ( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
A_ : List[str] = sequences
A_ : List[str] = char_preds.size(0 )
A_ : Dict = self._decode_helper(_a ,"""char""" )
A_ : List[str] = self._decode_helper(_a ,"""bpe""" )
A_ : Union[str, Any] = self._decode_helper(_a ,"""wp""" )
A_ : str = []
A_ : Any = []
for i in range(_a ):
A_ : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
A_ : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
A_ : Any = scores.index(max(_a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A_ : Optional[int] = {}
A_ : Dict = final_strs
A_ : Dict = final_scores
A_ : List[Any] = char_strs
A_ : Optional[Any] = bpe_strs
A_ : Optional[int] = wp_strs
return out
def _a ( self : List[str] ,_a : Dict ,_a : str ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
A_ : Dict = self.char_decode
A_ : int = 1
A_ : List[str] = """[s]"""
elif format == DecodeType.BPE:
A_ : Optional[int] = self.bpe_decode
A_ : List[Any] = 2
A_ : Tuple = """#"""
elif format == DecodeType.WORDPIECE:
A_ : int = self.wp_decode
A_ : Optional[Any] = 102
A_ : List[str] = """[SEP]"""
else:
raise ValueError(f'Format {format} is not supported.' )
A_ : Any = [], []
A_ : Tuple = pred_logits.size(0 )
A_ : Union[str, Any] = pred_logits.size(1 )
A_ : List[str] = pred_logits.topk(1 ,dim=-1 ,largest=_a ,sorted=_a )
A_ : Optional[int] = preds_index.view(-1 ,_a )[:, 1:]
A_ : Dict = decoder(_a )
A_ : Tuple = torch.nn.functional.softmax(_a ,dim=2 ).max(dim=2 )
A_ : str = preds_max_prob[:, 1:]
for index in range(_a ):
A_ : List[str] = preds_str[index].find(_a )
A_ : int = preds_str[index][:pred_eos]
A_ : Union[str, Any] = preds_index[index].cpu().tolist()
A_ : Union[str, Any] = pred_index.index(_a ) if eos_token in pred_index else -1
A_ : Any = preds_max_prob[index][: pred_eos_index + 1]
A_ : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_a )
conf_scores.append(_a )
return dec_strs, conf_scores
def _a ( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
A_ : str = [seq.replace(""" """ ,"""""" ) for seq in self.char_tokenizer.batch_decode(_a )]
return decode_strs
def _a ( self : Union[str, Any] ,_a : Optional[Any] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_a )
def _a ( self : int ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = [seq.replace(""" """ ,"""""" ) for seq in self.wp_tokenizer.batch_decode(_a )]
return decode_strs
| 713 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int=8):
A_ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str ,_a : UNetaDConditionModel ,_a : DDPMScheduler ,_a : VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_a ,scheduler=_a ,movq=_a ,)
A_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self : Dict ,_a : Any ,_a : List[Any] ,_a : List[Any] ,_a : Union[str, Any] ,_a : List[str] ,_a : Optional[int] ):
'''simple docstring'''
if latents is None:
A_ : Dict = randn_tensor(_a ,generator=_a ,device=_a ,dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
A_ : Dict = latents.to(_a )
A_ : int = latents * scheduler.init_noise_sigma
return latents
def _a ( self : str ,_a : Dict=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A_ : Tuple = torch.device(f'cuda:{gpu_id}' )
A_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a ,_a )
def _a ( self : int ,_a : List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A_ : List[Any] = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
A_ : Union[str, Any] = cpu_offload_with_hook(_a ,_a ,prev_module_hook=_a )
# We'll offload the last model manually.
A_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : str ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 100 ,_a : float = 4.0 ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,):
'''simple docstring'''
A_ : Union[str, Any] = self._execution_device
A_ : str = guidance_scale > 1.0
if isinstance(_a ,_a ):
A_ : str = torch.cat(_a ,dim=0 )
A_ : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_a ,_a ):
A_ : Dict = torch.cat(_a ,dim=0 )
if do_classifier_free_guidance:
A_ : str = image_embeds.repeat_interleave(_a ,dim=0 )
A_ : Union[str, Any] = negative_image_embeds.repeat_interleave(_a ,dim=0 )
A_ : Tuple = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=_a )
self.scheduler.set_timesteps(_a ,device=_a )
A_ : str = self.scheduler.timesteps
A_ : Optional[Any] = self.unet.config.in_channels
A_ : Any = downscale_height_and_width(_a ,_a ,self.movq_scale_factor )
# create initial latent
A_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,_a ,_a ,_a ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
A_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = {"""image_embeds""": image_embeds}
A_ : int = self.unet(
sample=_a ,timestep=_a ,encoder_hidden_states=_a ,added_cond_kwargs=_a ,return_dict=_a ,)[0]
if do_classifier_free_guidance:
A_ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
A_ : List[Any] = noise_pred.chunk(2 )
A_ : int = variance_pred.chunk(2 )
A_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A_ : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A_ : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A_ : Any = self.scheduler.step(
_a ,_a ,_a ,generator=_a ,)[0]
# post-processing
A_ : Tuple = self.movq.decode(_a ,force_not_quantize=_a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
A_ : int = image * 0.5 + 0.5
A_ : int = image.clamp(0 ,1 )
A_ : Any = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
A_ : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 714 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 0 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """M-CLIP"""
def __init__( self : int ,_a : str=1024 ,_a : str=768 ,**_a : Optional[Any] ):
'''simple docstring'''
A_ : Any = transformerDimSize
A_ : Dict = imageDimSize
super().__init__(**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = MCLIPConfig
def __init__( self : Optional[Any] ,_a : List[Any] ,*_a : str ,**_a : List[Any] ):
'''simple docstring'''
super().__init__(_a ,*_a ,**_a )
A_ : Optional[Any] = XLMRobertaModel(_a )
A_ : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def _a ( self : int ,_a : int ,_a : Any ):
'''simple docstring'''
A_ : Any = self.transformer(input_ids=_a ,attention_mask=_a )[0]
A_ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_a ), embs
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 0 |
'''simple docstring'''
import os
def lowerCamelCase ( ):
with open(os.path.dirname(lowerCamelCase) + """/grid.txt""") as f:
A_ : str = [] # noqa: E741
for _ in range(20):
l.append([int(lowerCamelCase) for x in f.readline().split()])
A_ : str = 0
# right
for i in range(20):
for j in range(17):
A_ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ : Optional[int] = temp
# down
for i in range(17):
for j in range(20):
A_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ : List[Any] = temp
# diagonal 1
for i in range(17):
for j in range(17):
A_ : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ : Optional[Any] = temp
# diagonal 2
for i in range(17):
for j in range(3 , 20):
A_ : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 716 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 0 |
'''simple docstring'''
import qiskit
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = qiskit.Aer.get_backend("""aer_simulator""")
# Create a Quantum Circuit acting on the q register
A_ : Optional[int] = qiskit.QuantumCircuit(lowerCamelCase , lowerCamelCase)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1])
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.2_5) = }""")
print(f"""{price_plus_tax(125.50, 0.0_5) = }""")
| 719 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """blip_2_vision_model"""
def __init__( self : List[str] ,_a : List[str]=1408 ,_a : str=6144 ,_a : int=39 ,_a : Tuple=16 ,_a : str=224 ,_a : Optional[Any]=14 ,_a : Tuple="gelu" ,_a : Dict=0.00001 ,_a : str=0.0 ,_a : Optional[Any]=1e-10 ,_a : Tuple=True ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Dict = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Any = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Union[str, Any] = patch_size
A_ : Optional[int] = image_size
A_ : List[Any] = initializer_range
A_ : Dict = attention_dropout
A_ : List[str] = layer_norm_eps
A_ : Union[str, Any] = hidden_act
A_ : Dict = qkv_bias
@classmethod
def _a ( cls : int ,_a : Union[str, os.PathLike] ,**_a : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ : str = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """blip_2_qformer"""
def __init__( self : List[str] ,_a : List[Any]=30522 ,_a : Any=768 ,_a : str=12 ,_a : str=12 ,_a : Any=3072 ,_a : Any="gelu" ,_a : Optional[int]=0.1 ,_a : List[str]=0.1 ,_a : Tuple=512 ,_a : List[str]=0.02 ,_a : Union[str, Any]=1e-12 ,_a : Optional[Any]=0 ,_a : Tuple="absolute" ,_a : Union[str, Any]=2 ,_a : Union[str, Any]=1408 ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : List[str] = vocab_size
A_ : Optional[int] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Optional[Any] = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : Optional[int] = cross_attention_frequency
A_ : List[str] = encoder_hidden_size
@classmethod
def _a ( cls : Optional[Any] ,_a : Union[str, os.PathLike] ,**_a : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ : Any = cls.get_config_dict(_a ,**_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : Tuple = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """blip-2"""
a_ = True
def __init__( self : str ,_a : Union[str, Any]=None ,_a : Any=None ,_a : str=None ,_a : List[str]=32 ,**_a : Union[str, Any] ):
'''simple docstring'''
super().__init__(**_a )
if vision_config is None:
A_ : int = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
A_ : Tuple = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
A_ : str = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
A_ : Tuple = BlipaVisionConfig(**_a )
A_ : str = BlipaQFormerConfig(**_a )
A_ : List[Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
A_ : Dict = CONFIG_MAPPING[text_model_type](**_a )
A_ : Union[str, Any] = self.text_config.tie_word_embeddings
A_ : int = self.text_config.is_encoder_decoder
A_ : Optional[int] = num_query_tokens
A_ : List[Any] = self.vision_config.hidden_size
A_ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A_ : List[Any] = 1.0
A_ : Optional[int] = 0.02
@classmethod
def _a ( cls : List[Any] ,_a : BlipaVisionConfig ,_a : BlipaQFormerConfig ,_a : PretrainedConfig ,**_a : Tuple ,):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_a ,)
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.vision_config.to_dict()
A_ : Optional[Any] = self.qformer_config.to_dict()
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
| 720 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = BioGptTokenizer
a_ = False
def _a ( self : Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A_ : Any = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file ,"""w""" ) as fp:
fp.write("""\n""".join(_a ) )
def _a ( self : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : int = """lower newer"""
A_ : Any = """lower newer"""
return input_text, output_text
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[Any] = BioGptTokenizer(self.vocab_file ,self.merges_file )
A_ : Optional[Any] = """lower"""
A_ : Union[str, Any] = ["""low""", """er</w>"""]
A_ : Union[str, Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
A_ : List[str] = tokens + ["""<unk>"""]
A_ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
@slow
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : List[str] = tokenizer.encode("""sequence builders""" ,add_special_tokens=_a )
A_ : Tuple = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_a )
A_ : str = tokenizer.build_inputs_with_special_tokens(_a )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(_a ,_a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
A_ : Tuple = int(lowerCamelCase)
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCamelCase)
A_ : List[str] = divmod(lowerCamelCase , 2)
return binary_recursive(lowerCamelCase) + str(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : str):
A_ : Optional[int] = str(lowerCamelCase).strip()
if not number:
raise ValueError("""No input value was provided""")
A_ : int = """-""" if number.startswith("""-""") else """"""
A_ : Dict = number.lstrip("""-""")
if not number.isnumeric():
raise ValueError("""Input value is not an integer""")
return F'{negative}0b{binary_recursive(int(lowerCamelCase))}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,_a : Union[str, Any] ,_a : Optional[Any]=13 ,_a : List[str]=30 ,_a : List[str]=2 ,_a : Dict=3 ,_a : Tuple=True ,_a : Optional[int]=True ,_a : Any=32 ,_a : List[str]=5 ,_a : Optional[int]=4 ,_a : List[Any]=37 ,_a : int="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Union[str, Any]=10 ,_a : Tuple=0.02 ,):
'''simple docstring'''
A_ : Union[str, Any] = parent
A_ : Optional[Any] = batch_size
A_ : Optional[int] = image_size
A_ : List[Any] = patch_size
A_ : Optional[int] = num_channels
A_ : List[str] = is_training
A_ : Union[str, Any] = use_labels
A_ : int = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : int = type_sequence_label_size
A_ : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Any = (image_size // patch_size) ** 2
A_ : Optional[Any] = num_patches + 1
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : str = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,)
return config, pixel_values
def _a ( self : Optional[int] ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
A_ : Any = FlaxViTModel(config=_a )
A_ : str = model(_a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
A_ : str = (self.image_size, self.image_size)
A_ : List[str] = (self.patch_size, self.patch_size)
A_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def _a ( self : List[Any] ,_a : Any ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.type_sequence_label_size
A_ : Tuple = FlaxViTForImageClassification(config=_a )
A_ : int = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : List[str] = 1
A_ : str = FlaxViTForImageClassification(_a )
A_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Any = model(_a )
def _a ( self : int ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
(
A_
) : int = config_and_inputs
A_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = FlaxViTModelTester(self )
A_ : str = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def _a ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_a )
A_ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : List[str] = self._prepare_for_class(_a ,_a )
A_ : Tuple = model_class(_a )
@jax.jit
def model_jitted(_a : Dict ,**_a : Dict ):
return model(pixel_values=_a ,**_a )
with self.subTest("""JIT Enabled""" ):
A_ : List[str] = model_jitted(**_a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A_ : Union[str, Any] = model_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) ,len(_a ) )
for jitted_output, output in zip(_a ,_a ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def _a ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ : int = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
A_ : str = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_a )
| 701 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""note_seq"""]
def __init__( self : int ,*_a : Optional[int] ,**_a : Any ):
'''simple docstring'''
requires_backends(self ,["""note_seq"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : Optional[Any] ,**_a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
@classmethod
def _a ( cls : Any ,*_a : Any ,**_a : List[str] ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
| 702 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.