code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __magic_name__ ( ):
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa", 1024 )
print("Key files generation successful." )
def __magic_name__ ( A : Any ):
'''simple docstring'''
print("Generating prime p..." )
a = rabinMiller.generate_large_prime(__lowerCAmelCase )
print("Generating prime q..." )
a = rabinMiller.generate_large_prime(__lowerCAmelCase )
a = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
a = random.randrange(2 ** (key_size - 1), 2 ** (key_size) )
if cryptoMath.gcd(__lowerCAmelCase, (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
a = cryptoMath.find_mod_inverse(__lowerCAmelCase, (p - 1) * (q - 1) )
a = (n, e)
a = (n, d)
return (public_key, private_key)
def __magic_name__ ( A : List[str], A : Tuple ):
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
a = generate_key(__lowerCAmelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""", "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""", "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
__lowerCAmelCase : List[Any] = {
'km/h': 1.0,
'm/s': 3.6,
'mph': 1.6_0_9_3_4_4,
'knot': 1.8_5_2,
}
__lowerCAmelCase : Union[str, Any] = {
'km/h': 1.0,
'm/s': 0.2_7_7_7_7_7_7_7_8,
'mph': 0.6_2_1_3_7_1_1_9_2,
'knot': 0.5_3_9_9_5_6_8_0_3,
}
def __magic_name__ ( A : List[str], A : Union[str, Any], A : int ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
a = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : Dict = numpy.array([0, 0])
__lowerCAmelCase : Optional[int] = numpy.array([0.5, 0.8_6_6_0_2_5_4])
__lowerCAmelCase : Optional[int] = numpy.array([1, 0])
__lowerCAmelCase : Any = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCamelCase ( A : str, A : List[Any] ):
'''simple docstring'''
a = initial_vectors
for _ in range(lowerCAmelCase__ ):
a = iteration_step(lowerCAmelCase__ )
return vectors
def _UpperCamelCase ( A : Dict ):
'''simple docstring'''
a = []
for i, start_vector in enumerate(vectors[:-1] ):
a = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
a = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCamelCase ( A : List[str], A : Tuple ):
'''simple docstring'''
a = numpy.radians(lowerCAmelCase__ )
a , a = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
a = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__, lowerCAmelCase__ )
def _UpperCamelCase ( A : Any ):
'''simple docstring'''
a = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a , a = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__, lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Union[str, Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case__ (__lowerCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = """cvt"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=[7, 3, 3] , __lowerCamelCase : Dict=[4, 2, 2] , __lowerCamelCase : List[Any]=[2, 1, 1] , __lowerCamelCase : Union[str, Any]=[64, 1_92, 3_84] , __lowerCamelCase : Optional[int]=[1, 3, 6] , __lowerCamelCase : Dict=[1, 2, 10] , __lowerCamelCase : List[Any]=[4.0, 4.0, 4.0] , __lowerCamelCase : str=[0.0, 0.0, 0.0] , __lowerCamelCase : Any=[0.0, 0.0, 0.0] , __lowerCamelCase : List[str]=[0.0, 0.0, 0.1] , __lowerCamelCase : int=[True, True, True] , __lowerCamelCase : Tuple=[False, False, True] , __lowerCamelCase : str=["dw_bn", "dw_bn", "dw_bn"] , __lowerCamelCase : List[Any]=[3, 3, 3] , __lowerCamelCase : Tuple=[1, 1, 1] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=[1, 1, 1] , __lowerCamelCase : int=[1, 1, 1] , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=1e-12 , **__lowerCamelCase : List[str] , ) -> List[Any]:
super().__init__(**_UpperCamelCase )
a = num_channels
a = patch_sizes
a = patch_stride
a = patch_padding
a = embed_dim
a = num_heads
a = depth
a = mlp_ratio
a = attention_drop_rate
a = drop_rate
a = drop_path_rate
a = qkv_bias
a = cls_token
a = qkv_projection_method
a = kernel_qkv
a = padding_kv
a = stride_kv
a = padding_q
a = stride_q
a = initializer_range
a = layer_norm_eps
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
from __future__ import annotations
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase, [], 0, [0 for i in range(len(_lowerCamelCase ) )] )
def __magic_name__ ( A : Optional[int], A : Union[str, Any], A : Union[str, Any], A : Union[str, Any], ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a = True
create_state_space_tree(_lowerCamelCase, _lowerCamelCase, index + 1, _lowerCamelCase )
current_sequence.pop()
a = False
__lowerCAmelCase : Tuple = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowerCAmelCase : List[Any] = ['A', 'B', 'C']
generate_all_permutations(sequence_a)
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class snake_case__ (_A ):
"""simple docstring"""
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case__ (_A ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
a = max_length
a = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
a = input_ids.shape[-1]
a = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case__ (_A ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
a = start_length
a = max_new_tokens
a = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case__ (_A ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
a = max_time
a = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case__ (_A ):
"""simple docstring"""
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def __magic_name__ ( A : Optional[Any], A : Optional[int] ):
'''simple docstring'''
a = stopping_criteria.max_length
a = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case__ (_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = """donut-swin"""
SCREAMING_SNAKE_CASE_ : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Union[str, Any] , __lowerCamelCase : int=2_24 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=96 , __lowerCamelCase : Any=[2, 2, 6, 2] , __lowerCamelCase : str=[3, 6, 12, 24] , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : str=4.0 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : int=1e-5 , **__lowerCamelCase : Any , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(lowerCamelCase_ )
a = num_heads
a = window_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = use_absolute_embeddings
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
__lowerCAmelCase : List[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class snake_case__ (_a ):
"""simple docstring"""
def __init__( self : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[Any] ) -> str:
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(snake_case_ )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Union[str, Any] ) -> Dict:
a , a = {}, {}
if padding is not None:
a = padding
if truncation is not None:
a = truncation
if top_k is not None:
a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] = None , **__lowerCamelCase : Tuple ) -> Any:
if isinstance(snake_case_ , (Image.Image, str) ) and isinstance(snake_case_ , snake_case_ ):
a = {"image": image, "question": question}
else:
a = image
a = super().__call__(snake_case_ , **snake_case_ )
return results
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=False ) -> Tuple:
a = load_image(inputs["image"] )
a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=snake_case_ , truncation=snake_case_ )
a = self.image_processor(images=snake_case_ , return_tensors=self.framework )
model_inputs.update(snake_case_ )
return model_inputs
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] ) -> List[str]:
a = self.model(**snake_case_ )
return model_outputs
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=5 ) -> int:
if top_k > self.model.config.num_labels:
a = self.model.config.num_labels
if self.framework == "pt":
a = model_outputs.logits.sigmoid()[0]
a , a = probs.topk(snake_case_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
a = scores.tolist()
a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
import argparse
import os
import re
__lowerCAmelCase : Any = '''src/transformers'''
# Pattern that looks at the indentation in a line.
__lowerCAmelCase : Optional[Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCAmelCase : str = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCAmelCase : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCAmelCase : Optional[Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCAmelCase : int = re.compile(r'\[([^\]]+)\]')
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = _re_indent.search(__UpperCamelCase )
return "" if search is None else search.groups()[0]
def __magic_name__ ( A : Dict, A : Dict="", A : Tuple=None, A : Any=None ):
'''simple docstring'''
a = 0
a = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__UpperCamelCase ):
index += 1
a = ["""\n""".join(lines[:index] )]
else:
a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a = [lines[index]]
index += 1
while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__UpperCamelCase ) )
if index < len(__UpperCamelCase ) - 1:
a = [lines[index + 1]]
index += 1
else:
a = []
else:
blocks.append("\n".join(__UpperCamelCase ) )
a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCamelCase ) > 0:
blocks.append("\n".join(__UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCamelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
def _inner(A : str ):
return key(__UpperCamelCase ).lower().replace("_", "" )
return _inner
def __magic_name__ ( A : Dict, A : int=None ):
'''simple docstring'''
def noop(A : int ):
return x
if key is None:
a = noop
# Constants are all uppercase, they go first.
a = [obj for obj in objects if key(__UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
a = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()]
a = ignore_underscore(__UpperCamelCase )
return sorted(__UpperCamelCase, key=__UpperCamelCase ) + sorted(__UpperCamelCase, key=__UpperCamelCase ) + sorted(__UpperCamelCase, key=__UpperCamelCase )
def __magic_name__ ( A : Any ):
'''simple docstring'''
def _replace(A : List[str] ):
a = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a = [part.strip().replace("\"", "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(__UpperCamelCase )] ) + "]"
a = import_statement.split("\n" )
if len(__UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a = 2 if lines[1].strip() == """[""" else 1
a = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a = sort_objects(__UpperCamelCase, key=lambda A : x[1] )
a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a = _re_bracket_content.sub(_replace, lines[1] )
else:
a = [part.strip().replace("\"", "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a = keys[:-1]
a = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(__UpperCamelCase )] )
return "\n".join(__UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
a = _re_bracket_content.sub(_replace, __UpperCamelCase )
return import_statement
def __magic_name__ ( A : Dict, A : Optional[Any]=True ):
'''simple docstring'''
with open(__UpperCamelCase, encoding="utf-8" ) as f:
a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a = split_code_in_indented_blocks(
__UpperCamelCase, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a = main_blocks[block_idx]
a = block.split("\n" )
# Get to the start of the imports.
a = 0
while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a = len(__UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
a = """\n""".join(block_lines[line_idx:-1] )
a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a = split_code_in_indented_blocks(__UpperCamelCase, indent_level=__UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
a = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None]
a = [x[0] for x in sorted(__UpperCamelCase, key=lambda A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a = 0
a = []
for i in range(len(__UpperCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
a = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(__UpperCamelCase, "w", encoding="utf-8" ) as f:
f.write("\n".join(__UpperCamelCase ) )
def __magic_name__ ( A : int=True ):
'''simple docstring'''
a = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
a = sort_imports(os.path.join(__UpperCamelCase, "__init__.py" ), check_only=__UpperCamelCase )
if result:
a = [os.path.join(__UpperCamelCase, "__init__.py" )]
if len(__UpperCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(__UpperCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Tuple = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict ) -> Tuple:
a = False
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> Tuple:
if not self.initialized:
a = RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
a = True
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
self.retriever.index.init_index()
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> List[Any]:
a , a = self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class snake_case__ (_lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str=None ) -> List[str]:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you\'ll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
a = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self : str ) -> int:
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> Optional[Any]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a , a = ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
a , a = self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Any ) -> str:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=None , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
a = kwargs.pop("config" , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
a = RagTokenizer.from_pretrained(A_ , config=A_ )
a = rag_tokenizer.question_encoder
a = rag_tokenizer.generator
if indexed_dataset is not None:
a = "custom"
a = CustomHFIndex(config.retrieval_vector_size , A_ )
else:
a = cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] ) -> Union[str, Any]:
a = []
a = 0
a = 0
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
return self.head == self.tail
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ) -> List[Any]:
self.data.append(__lowerCamelCase )
a = self.tail + 1
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = self.data[self.head]
a = self.head + 1
return ret
def __UpperCAmelCase ( self : str ) -> Dict:
return self.tail - self.head
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Dict ) -> Optional[Any]:
a = data
a = None
a = None
a = 1
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
return self.data
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
return self.left
def __UpperCAmelCase ( self : Any ) -> Dict:
return self.right
def __UpperCAmelCase ( self : str ) -> str:
return self.height
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[Any] ) -> Tuple:
a = data
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] ) -> Any:
a = node
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ) -> Dict:
a = node
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict ) -> Any:
a = height
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __magic_name__ ( A : int, A : Any ):
'''simple docstring'''
if a > b:
return a
return b
def __magic_name__ ( A : Tuple ):
'''simple docstring'''
print("left rotation node:", node.get_data() )
a = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__SCREAMING_SNAKE_CASE )
a = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
a = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__SCREAMING_SNAKE_CASE )
return ret
def __magic_name__ ( A : Any ):
'''simple docstring'''
print("right rotation node:", node.get_data() )
a = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__SCREAMING_SNAKE_CASE )
a = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
a = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__SCREAMING_SNAKE_CASE )
return ret
def __magic_name__ ( A : Optional[Any] ):
'''simple docstring'''
a = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__SCREAMING_SNAKE_CASE ) )
return right_rotation(__SCREAMING_SNAKE_CASE )
def __magic_name__ ( A : str ):
'''simple docstring'''
a = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__SCREAMING_SNAKE_CASE ) )
return left_rotation(__SCREAMING_SNAKE_CASE )
def __magic_name__ ( A : Union[str, Any], A : Union[str, Any] ):
'''simple docstring'''
if node is None:
return MyNode(__SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), __SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
a = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
a = right_rotation(__SCREAMING_SNAKE_CASE )
else:
a = lr_rotation(__SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right(), __SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
a = node.get_right()
assert right_child is not None
if data < right_child.get_data():
a = rl_rotation(__SCREAMING_SNAKE_CASE )
else:
a = left_rotation(__SCREAMING_SNAKE_CASE )
a = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__SCREAMING_SNAKE_CASE )
return node
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
while True:
a = root.get_right()
if right_child is None:
break
a = right_child
return root.get_data()
def __magic_name__ ( A : int ):
'''simple docstring'''
while True:
a = root.get_left()
if left_child is None:
break
a = left_child
return root.get_data()
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
a = root.get_left()
a = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
a = get_left_most(__SCREAMING_SNAKE_CASE )
root.set_data(__SCREAMING_SNAKE_CASE )
root.set_right(del_node(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
a = left_child
elif right_child is not None:
a = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) )
if get_height(__SCREAMING_SNAKE_CASE ) - get_height(__SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
a = left_rotation(__SCREAMING_SNAKE_CASE )
else:
a = rl_rotation(__SCREAMING_SNAKE_CASE )
elif get_height(__SCREAMING_SNAKE_CASE ) - get_height(__SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
a = right_rotation(__SCREAMING_SNAKE_CASE )
else:
a = lr_rotation(__SCREAMING_SNAKE_CASE )
a = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(__SCREAMING_SNAKE_CASE )
return root
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple ) -> Any:
a = None
def __UpperCAmelCase ( self : Any ) -> List[Any]:
return get_height(self.root )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] ) -> List[Any]:
print("insert:" + str(__lowerCamelCase ) )
a = insert_node(self.root , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
print("delete:" + str(__lowerCamelCase ) )
if self.root is None:
print("Tree is empty!" )
return
a = del_node(self.root , __lowerCamelCase )
def __str__( self : Dict , ) -> List[str]: # a level traversale, gives a more intuitive look on the tree
a = ""
a = MyQueue()
q.push(self.root )
a = self.get_height()
if layer == 0:
return output
a = 0
while not q.is_empty():
a = q.pop()
a = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__lowerCamelCase )
q.push(__lowerCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
a = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , __lowerCamelCase ) - 1:
a = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCAmelCase : Union[str, Any] = AVLtree()
__lowerCAmelCase : List[Any] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __magic_name__ ( A : Dict ):
'''simple docstring'''
a = filter(lambda A : p.requires_grad, model.parameters() )
a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if metric == "rouge2":
a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
a = ModelCheckpoint(
dirpath=A, filename=A, monitor=F"""val_{metric}""", mode="max", save_top_k=1, every_n_epochs=1, )
return checkpoint_callback
def __magic_name__ ( A : Union[str, Any], A : Optional[int] ):
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=A, verbose=A, )
class snake_case__ (pl.Callback ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
a = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
a = Path(pl_module.hparams.output_dir )
if type_path == "test":
a = od / "test_results.txt"
a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
a = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCamelCase )
generations_file.parent.mkdir(exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , "a+" ) as writer:
for key in sorted(__lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
a = metrics[key]
if isinstance(__lowerCamelCase , torch.Tensor ):
a = val.item()
a = f"""{key}: {val:.6f}\n"""
writer.write(__lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> List[Any]:
try:
a = pl_module.model.model.num_parameters()
except AttributeError:
a = pl_module.model.num_parameters()
a = count_trainable_parameters(__lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCamelCase , __lowerCamelCase , "test" )
@rank_zero_only
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
import math
def __magic_name__ ( A : int = 100 ):
'''simple docstring'''
a = sum(i * i for i in range(1, n + 1 ) )
a = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
import numpy
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : numpy.ndarray ) -> None:
a = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
a = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
a = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
a = numpy.random.rand(3 , 1 )
# Real output values provided.
a = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
a = numpy.zeros(output_array.shape )
def __UpperCAmelCase ( self : Union[str, Any] ) -> numpy.ndarray:
a = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
a = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
a = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __UpperCAmelCase ( self : int ) -> None:
a = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
a = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
a = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __UpperCAmelCase ( self : int , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : int , __lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
a = self.feedforward()
self.back_propagation()
if give_loss:
a = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : numpy.ndarray ) -> int:
a = input_arr
a = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
a = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
a = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __magic_name__ ( A : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __magic_name__ ( A : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __magic_name__ ( ):
'''simple docstring'''
a = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
), dtype=numpy.floataa, )
# True output values for the given input values.
a = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.floataa )
# Calling neural network class.
a = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase, output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase, iterations=10, give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
def __magic_name__ ( A : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(A, (list, tuple) ) or not all(
isinstance(A, A ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
a = numbers[0]
for i in range(1, len(A ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a = min_till_now, max_till_now
a = max(A, max_till_now * number )
a = min(A, min_till_now * number )
# update the maximum product found till now
a = max(A, A )
return max_prod
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
from __future__ import annotations
def __magic_name__ ( A : list[int] ): # This function is recursive
a = len(snake_case__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a = array[0]
a = False
a = 1
a = []
while not is_found and i < array_length:
if array[i] < pivot:
a = True
a = [element for element in array[i:] if element >= array[i]]
a = longest_subsequence(snake_case__ )
if len(snake_case__ ) > len(snake_case__ ):
a = temp_array
else:
i += 1
a = [element for element in array[1:] if element >= pivot]
a = [pivot, *longest_subsequence(snake_case__ )]
if len(snake_case__ ) > len(snake_case__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
import torch
from torch import nn
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Optional[Any]=False ) -> Union[str, Any]:
super().__init__()
a = n_token
a = d_embed
a = d_proj
a = cutoffs + [n_token]
a = [0] + self.cutoffs
a = div_val
a = self.cutoffs[0]
a = len(self.cutoffs ) - 1
a = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
a = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
a = nn.Parameter(torch.zeros(self.n_clusters ) )
a = nn.ModuleList()
a = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
a = keep_order
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
if proj is None:
a = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
a = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
a = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=False ) -> Any:
if labels is not None:
# Shift so that tokens < n predict n
a = hidden[..., :-1, :].contiguous()
a = labels[..., 1:].contiguous()
a = hidden.view(-1 , hidden.size(-1 ) )
a = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
a = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
a = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
a = labels != -1_00
a = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
a = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
a = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
a , a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.out_layers[0].weight[l_idx:r_idx]
a = self.out_layers[0].bias[l_idx:r_idx]
else:
a = self.out_layers[i].weight
a = self.out_layers[i].bias
if i == 0:
a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
a , a , a = weights[0], biases[0], self.out_projs[0]
a = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
a = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
a = 0
a = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
a , a = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
a = (labels >= l_idx) & (labels < r_idx)
a = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
a = labels.index_select(0 , __lowerCAmelCase ) - l_idx
a = head_logprob.index_select(0 , __lowerCAmelCase )
a = hidden.index_select(0 , __lowerCAmelCase )
else:
a = hidden
if i == 0:
if labels is not None:
a = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
a = head_logprob[:, : self.cutoffs[0]]
else:
a , a , a = weights[i], biases[i], self.out_projs[i]
a = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
a = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
a = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
a = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
a = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
if self.n_clusters == 0:
a = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
a , a = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.out_layers[0].weight[l_idx:r_idx]
a = self.out_layers[0].bias[l_idx:r_idx]
else:
a = self.out_layers[i].weight
a = self.out_layers[i].bias
if i == 0:
a = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
a , a , a = weights[0], biases[0], self.out_projs[0]
a = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a = hidden.new_empty((head_logit.size(0 ), self.n_token) )
a = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
a = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
a , a = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
a = head_logprob[:, : self.cutoffs[0]]
else:
a , a , a = weights[i], biases[i], self.out_projs[i]
a = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
a = head_logprob[:, -i] + tail_logprob_i
a = logprob_i
return out
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : Dict = logging.getLogger(__name__)
__lowerCAmelCase : int = tf.data.AUTOTUNE
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config", type=UpperCAmelCase__, default="roberta-base", help="The model config to use. Note that we don't copy the model's weights, only the config!", )
parser.add_argument(
"--tokenizer", type=UpperCAmelCase__, default="unigram-tokenizer-wikitext", help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.", )
parser.add_argument(
"--per_replica_batch_size", type=UpperCAmelCase__, default=8, help="Batch size per TPU core.", )
parser.add_argument(
"--no_tpu", action="store_true", help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.", )
parser.add_argument(
"--tpu_name", type=UpperCAmelCase__, help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.", default="local", )
parser.add_argument(
"--tpu_zone", type=UpperCAmelCase__, help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.", )
parser.add_argument(
"--gcp_project", type=UpperCAmelCase__, help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16", action="store_true", help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.", )
parser.add_argument(
"--train_dataset", type=UpperCAmelCase__, help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--shuffle_buffer_size", type=UpperCAmelCase__, default=2**18, help="Size of the shuffle buffer (in samples)", )
parser.add_argument(
"--eval_dataset", type=UpperCAmelCase__, help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--num_epochs", type=UpperCAmelCase__, default=1, help="Number of epochs to train for.", )
parser.add_argument(
"--learning_rate", type=UpperCAmelCase__, default=1E-4, help="Learning rate to use for training.", )
parser.add_argument(
"--weight_decay_rate", type=UpperCAmelCase__, default=1E-3, help="Weight decay rate to use for training.", )
parser.add_argument(
"--max_length", type=UpperCAmelCase__, default=512, help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py", )
parser.add_argument(
"--mlm_probability", type=UpperCAmelCase__, default=0.15, help="Fraction of tokens to mask during training.", )
parser.add_argument("--output_dir", type=UpperCAmelCase__, required=UpperCAmelCase__, help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id", type=UpperCAmelCase__, help="Model ID to upload to on the Hugging Face Hub." )
a = parser.parse_args()
return args
def __magic_name__ ( A : Tuple ):
'''simple docstring'''
try:
if args.tpu_name:
a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(UpperCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(UpperCAmelCase__ )
return tpu
def __magic_name__ ( A : int ):
'''simple docstring'''
a = 0
for file in file_list:
a = file.split("/" )[-1]
a = re.search(R"-\d+-(\d+)\.tfrecord", UpperCAmelCase__ ).group(1 )
a = int(UpperCAmelCase__ )
num_samples += sample_count
return num_samples
def __magic_name__ ( A : Tuple, A : List[str], A : str, A : Optional[int], A : List[str], A : Optional[int]=None ):
'''simple docstring'''
a = count_samples(UpperCAmelCase__ )
a = tf.data.Dataset.from_tensor_slices(UpperCAmelCase__ )
if shuffle:
a = dataset.shuffle(len(UpperCAmelCase__ ) )
a = tf.data.TFRecordDataset(UpperCAmelCase__, num_parallel_reads=UpperCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
a = dataset.apply(tf.data.experimental.assert_cardinality(UpperCAmelCase__ ) )
a = dataset.map(UpperCAmelCase__, num_parallel_calls=UpperCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
a = dataset.shuffle(args.shuffle_buffer_size )
a = dataset.batch(UpperCAmelCase__, drop_remainder=UpperCAmelCase__ )
a = dataset.map(UpperCAmelCase__, num_parallel_calls=UpperCAmelCase__ )
a = dataset.prefetch(UpperCAmelCase__ )
return dataset
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
if not args.no_tpu:
a = initialize_tpu(UpperCAmelCase__ )
a = tf.distribute.TPUStrategy(UpperCAmelCase__ )
else:
a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
a = AutoTokenizer.from_pretrained(args.tokenizer )
a = AutoConfig.from_pretrained(args.pretrained_model_config )
a = tokenizer.vocab_size
a = tf.io.gfile.glob(os.path.join(args.train_dataset, "*.tfrecord" ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
a = tf.io.gfile.glob(os.path.join(args.eval_dataset, "*.tfrecord" ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
a = count_samples(UpperCAmelCase__ )
a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
a = steps_per_epoch * args.num_epochs
with strategy.scope():
a = TFAutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
a = create_optimizer(
num_train_steps=UpperCAmelCase__, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=UpperCAmelCase__, metrics=["accuracy"] )
def decode_fn(A : Union[str, Any] ):
a = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(UpperCAmelCase__, UpperCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
a = DataCollatorForLanguageModeling(
tokenizer=UpperCAmelCase__, mlm_probability=args.mlm_probability, mlm=UpperCAmelCase__, return_tensors="tf" )
def mask_with_collator(A : List[str] ):
# TF really needs an isin() function
a = (
~tf.cast(batch["attention_mask"], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
a = data_collator.tf_mask_tokens(
batch["input_ids"], vocab_size=len(UpperCAmelCase__ ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=UpperCAmelCase__, )
return batch
a = args.per_replica_batch_size * strategy.num_replicas_in_sync
a = prepare_dataset(
UpperCAmelCase__, decode_fn=UpperCAmelCase__, mask_fn=UpperCAmelCase__, batch_size=UpperCAmelCase__, shuffle=UpperCAmelCase__, shuffle_buffer_size=args.shuffle_buffer_size, )
a = prepare_dataset(
UpperCAmelCase__, decode_fn=UpperCAmelCase__, mask_fn=UpperCAmelCase__, batch_size=UpperCAmelCase__, shuffle=UpperCAmelCase__, )
a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=UpperCAmelCase__ ) )
model.fit(
UpperCAmelCase__, validation_data=UpperCAmelCase__, epochs=args.num_epochs, callbacks=UpperCAmelCase__, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = parse_args()
main(args)
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import qiskit
def __magic_name__ ( A : int, A : int ):
'''simple docstring'''
a = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a = qiskit.QuantumCircuit(_lowercase, _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
a = qiskit.execute(_lowercase, _lowercase, shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
from collections.abc import Iterable
from typing import Any
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : int | None = None ) -> Optional[Any]:
a = value
a = None # Added in order to delete a node easier
a = None
a = None
def __repr__( self : str ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class snake_case__ :
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Node | None = None ) -> str:
a = root
def __str__( self : str ) -> str:
return str(self.root )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Node , __lowerCamelCase : Node | None ) -> None:
if new_children is not None: # reset its kids
a = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__lowerCAmelCase ): # If it is the right children
a = new_children
else:
a = new_children
else:
a = new_children
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCAmelCase ( self : str ) -> bool:
return self.root is None
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> None:
a = Node(__lowerCAmelCase ) # create a new Node
if self.empty(): # if Tree is empty
a = new_node # set its root
else: # Tree is not empty
a = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a = new_node # We insert the new node in a leaf
break
else:
a = parent_node.left
else:
if parent_node.right is None:
a = new_node
break
else:
a = parent_node.right
a = parent_node
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Any ) -> None:
for value in values:
self.__insert(__lowerCAmelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
a = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a = node.left if value < node.value else node.right
return node
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
a = self.root
if not self.empty():
while node.right is not None:
a = node.right
return node
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Node | None = None ) -> Node | None:
if node is None:
a = self.root
if self.root is None:
return None
if not self.empty():
a = self.root
while node.left is not None:
a = node.left
return node
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int ) -> None:
a = self.search(__lowerCAmelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__lowerCAmelCase , __lowerCAmelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__lowerCAmelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__lowerCAmelCase , node.left )
else:
a = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Tuple=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCAmelCase ( self : str , __lowerCamelCase : list , __lowerCamelCase : Node | None ) -> None:
if node:
self.inorder(__lowerCAmelCase , node.left )
arr.append(node.value )
self.inorder(__lowerCAmelCase , node.right )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Node ) -> int:
a = []
self.inorder(__lowerCAmelCase , __lowerCAmelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def __magic_name__ ( A : Node | None ):
'''simple docstring'''
a = []
if curr_node is not None:
a = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __magic_name__ ( ):
'''simple docstring'''
a = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a = BinarySearchTree()
for i in testlist:
t.insert(UpperCAmelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCAmelCase__ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: ", t.get_max().value ) # type: ignore
print("Min Value: ", t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCAmelCase__ )
print(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
from copy import deepcopy
class snake_case__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict = None , __lowerCamelCase : List[str] = None ) -> Dict:
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError("Either arr or size must be specified" )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any ) -> Any:
a = len(UpperCAmelCase_ )
a = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
a = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> int:
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Any:
return index - (index & (-index))
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Tuple:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(UpperCAmelCase_ )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> List[str]:
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] ) -> int:
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(UpperCAmelCase_ )
return result
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> List[str]:
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str ) -> Union[str, Any]:
return self.query(UpperCAmelCase_ , index + 1 )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[Any] ) -> List[str]:
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import sys
__lowerCAmelCase : Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = 1
for digit in s:
product *= int(_lowercase )
return product
def __magic_name__ ( A : List[Any] = N ):
'''simple docstring'''
a = -sys.maxsize - 1
a = n[:13]
a = 13
while cur_index < len(_lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a = substr[1:] + n[cur_index]
cur_index += 1
else:
a = max(_lowercase, str_eval(_lowercase ) )
a = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a = {}
a = job["started_at"]
a = job["completed_at"]
a = date_parser.parse(__UpperCamelCase )
a = date_parser.parse(__UpperCamelCase )
a = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a = start
a = end
a = duration_in_min
return job_info
def __magic_name__ ( A : List[Any], A : int=None ):
'''simple docstring'''
a = None
if token is not None:
a = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
a = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
a = requests.get(__UpperCamelCase, headers=__UpperCamelCase ).json()
a = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} )
a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__UpperCamelCase ):
a = requests.get(url + F"""&page={i + 2}""", headers=__UpperCamelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
__lowerCAmelCase : int = get_job_time(args.workflow_run_id)
__lowerCAmelCase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : int = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
from math import pi
def __magic_name__ ( A : List[str], A : Optional[Any] ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __magic_name__ ( A : List[Any], A : Optional[Any], A : Dict ):
'''simple docstring'''
a = 0
if start < end:
a = randint(_lowerCAmelCase, _lowerCAmelCase )
a = a[end]
a = a[pivot]
a = temp
a = _in_place_partition(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
count += _in_place_quick_sort(_lowerCAmelCase, _lowerCAmelCase, p - 1 )
count += _in_place_quick_sort(_lowerCAmelCase, p + 1, _lowerCAmelCase )
return count
def __magic_name__ ( A : List[Any], A : Optional[int], A : Dict ):
'''simple docstring'''
a = 0
a = randint(_lowerCAmelCase, _lowerCAmelCase )
a = a[end]
a = a[pivot]
a = temp
a = start - 1
for index in range(_lowerCAmelCase, _lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a = new_pivot_index + 1
a = a[new_pivot_index]
a = a[index]
a = temp
a = a[new_pivot_index + 1]
a = a[end]
a = temp
return new_pivot_index + 1, count
__lowerCAmelCase : Union[str, Any] = TemporaryFile()
__lowerCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
__lowerCAmelCase : List[str] = 0, 1 # mean and standard deviation
__lowerCAmelCase : Dict = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__lowerCAmelCase : List[Any] = np.load(outfile)
__lowerCAmelCase : Optional[Any] = len(M) - 1
__lowerCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 1_00_00
SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
class snake_case__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ParquetConfig
def __UpperCAmelCase ( self : Dict ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any ) -> Tuple:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
a = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__lowerCamelCase ):
with open(__lowerCamelCase , "rb" ) as f:
a = datasets.Features.from_arrow_schema(pq.read_schema(__lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str ) -> int:
a = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
with open(__lowerCamelCase , "rb" ) as f:
a = pq.ParquetFile(__lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
a = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}""" )
raise
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
from __future__ import annotations
def __magic_name__ ( A : Any ): # This function is recursive
'''simple docstring'''
a = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a = array[0]
a = False
a = 1
a = []
while not is_found and i < array_length:
if array[i] < pivot:
a = True
a = [element for element in array[i:] if element >= array[i]]
a = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a = temp_array
else:
i += 1
a = [element for element in array[1:] if element >= pivot]
a = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
from ...processing_utils import ProcessorMixin
class snake_case__ (__a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["image_processor", "feature_extractor"]
SCREAMING_SNAKE_CASE_ : List[Any] = "TvltImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] = "TvltFeatureExtractor"
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ) -> List[Any]:
super().__init__(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
a = image_processor
a = feature_extractor
def __call__( self : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=False , __lowerCamelCase : str=False , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict , ) -> str:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
a = None
if images is not None:
a = self.image_processor(lowerCAmelCase_ , mask_pixel=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if images_mixed is not None:
a = self.image_processor(lowerCAmelCase_ , is_mixed=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if audio is not None:
a = self.feature_extractor(
lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , mask_audio=lowerCAmelCase_ , **lowerCAmelCase_ )
a = {}
if audio is not None:
output_dict.update(lowerCAmelCase_ )
if images is not None:
output_dict.update(lowerCAmelCase_ )
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase_ )
return output_dict
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
a = self.image_processor.model_input_names
a = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Union[str, Any] = get_logger(__name__)
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Optional[str] = None ) -> Tuple:
a = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
a = Extractor
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
a = os.path.abspath(__a )
return os.path.join(self.extract_dir , hash_url_to_filename(__a ) )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : bool ) -> bool:
return force_extract or (
not os.path.isfile(__a ) and not (os.path.isdir(__a ) and os.listdir(__a ))
)
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> str:
a = self.extractor.infer_extractor_format(__a )
if not extractor_format:
return input_path
a = self._get_output_path(__a )
if self._do_extract(__a , __a ):
self.extractor.extract(__a , __a , __a )
return output_path
class snake_case__ (__lowercase ):
"""simple docstring"""
@classmethod
@abstractmethod
def __UpperCAmelCase ( cls : Union[str, Any] , __lowerCamelCase : Union[Path, str] , **__lowerCamelCase : Union[str, Any] ) -> bool:
...
@staticmethod
@abstractmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
...
class snake_case__ (__lowercase , __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[bytes] = []
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : int ) -> List[Any]:
with open(__a , "rb" ) as f:
return f.read(__a )
@classmethod
def __UpperCAmelCase ( cls : Dict , __lowerCamelCase : Union[Path, str] , __lowerCamelCase : bytes = b"" ) -> bool:
if not magic_number:
a = max(len(__a ) for cls_magic_number in cls.magic_numbers )
try:
a = cls.read_magic_number(__a , __a )
except OSError:
return False
return any(magic_number.startswith(__a ) for cls_magic_number in cls.magic_numbers )
class snake_case__ (__lowercase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : Union[Path, str] , **__lowerCamelCase : Dict ) -> bool:
return tarfile.is_tarfile(__a )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any ) -> int:
def resolved(__lowerCamelCase : str ) -> str:
return os.path.realpath(os.path.abspath(__a ) )
def badpath(__lowerCamelCase : str , __lowerCamelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a ) ).startswith(__a )
def badlink(__lowerCamelCase : int , __lowerCamelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
a = resolved(os.path.join(__a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__a )
a = resolved(__a )
for finfo in members:
if badpath(finfo.name , __a ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__a , __a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__a , __a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
os.makedirs(__a , exist_ok=__a )
a = tarfile.open(__a )
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a ) )
tar_file.close()
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [B'''\x1F\x8B''']
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
with gzip.open(__a , "rb" ) as gzip_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : Union[Path, str] , __lowerCamelCase : bytes = b"" ) -> bool:
if super().is_extractable(__a , magic_number=__a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , "rb" ) as fp:
a = _EndRecData(__a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
a = fp.read(__a ) # CD is where we expect it to be
if len(__a ) == sizeCentralDir:
a = struct.unpack(__a , __a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
os.makedirs(__a , exist_ok=__a )
with zipfile.ZipFile(__a , "r" ) as zip_file:
zip_file.extractall(__a )
zip_file.close()
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
with lzma.open(__a ) as compressed_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__a , exist_ok=__a )
a = rarfile.RarFile(__a )
rf.extractall(__a )
rf.close()
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
a = zstd.ZstdDecompressor()
with open(__a , "rb" ) as ifh, open(__a , "wb" ) as ofh:
dctx.copy_stream(__a , __a )
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [B'''\x42\x5A\x68''']
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
with bza.open(__a , "rb" ) as compressed_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__a , exist_ok=__a )
with pyazr.SevenZipFile(__a , "r" ) as archive:
archive.extractall(__a )
class snake_case__ (__lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__a , "rb" ) as compressed_file:
with open(__a , "wb" ) as extracted_file:
shutil.copyfileobj(__a , __a )
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> List[str]:
return max(
len(__a )
for extractor in cls.extractors.values()
if issubclass(__a , __a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Union[Path, str] , __lowerCamelCase : int ) -> List[Any]:
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a )
except OSError:
return b""
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , __lowerCamelCase : Union[Path, str] , __lowerCamelCase : bool = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__a , )
a = cls.infer_extractor_format(__a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __UpperCAmelCase ( cls : Tuple , __lowerCamelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
a = cls._get_magic_number_max_length()
a = cls._read_magic_number(__a , __a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a ):
return extractor_format
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Union[Path, str] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(__a ) , exist_ok=__a )
# Prevent parallel extractions
a = str(Path(__a ).with_suffix(".lock" ) )
with FileLock(__a ):
shutil.rmtree(__a , ignore_errors=__a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__a , )
a = extractor if extractor != """deprecated""" else extractor_format
else:
a = cls.extractors[extractor_format]
return extractor.extract(__a , __a )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a ):
return extractor.extract(__a , __a )
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__lowerCAmelCase : str = ""
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case__ (tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : str = " " ) -> Any:
a = sentence_delimiter
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ) -> Any:
return list(UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[str] ) -> List[str]:
a = []
for sent_idx, sentence in enumerate(UpperCAmelCase__ ):
chars.extend(self.process_string(UpperCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__lowerCAmelCase : List[str] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__lowerCAmelCase : Dict = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__lowerCAmelCase : Union[str, Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__lowerCAmelCase : Dict = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
__lowerCAmelCase : Optional[int] = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str=False ) -> Optional[Any]:
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , )["wer"]
a = 0
a = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
a = jiwer.compute_measures(
UpperCAmelCase__ , UpperCAmelCase__ , truth_transform=UpperCAmelCase__ , hypothesis_transform=UpperCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE_ : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
SCREAMING_SNAKE_CASE_ : int = ["""accelerate""", """launch"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path.home() / """.cache/huggingface/accelerate"""
SCREAMING_SNAKE_CASE_ : str = """default_config.yaml"""
SCREAMING_SNAKE_CASE_ : List[str] = config_folder / config_file
SCREAMING_SNAKE_CASE_ : List[str] = config_folder / """_default_config.yaml"""
SCREAMING_SNAKE_CASE_ : List[Any] = Path("""tests/test_configs""" )
@classmethod
def __UpperCAmelCase ( cls : Tuple ) -> Tuple:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> List[Any]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def __UpperCAmelCase ( self : Tuple ) -> str:
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=A__ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(A__ ), self.test_file_path] , env=os.environ.copy() )
def __UpperCAmelCase ( self : Any ) -> Tuple:
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """test-tpu"""
SCREAMING_SNAKE_CASE_ : str = """us-central1-a"""
SCREAMING_SNAKE_CASE_ : Any = """ls"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""accelerate""", """tpu-config"""]
SCREAMING_SNAKE_CASE_ : Dict = """cd /usr/share"""
SCREAMING_SNAKE_CASE_ : str = """tests/test_samples/test_command_file.sh"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """Running gcloud compute tpus tpu-vm ssh"""
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , A__ , )
def __UpperCAmelCase ( self : Tuple ) -> Any:
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , A__ , )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=A__ )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , A__ , )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , A__ , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , A__ , )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
a = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , A__ , )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , A__ , )
def __UpperCAmelCase ( self : Tuple ) -> Any:
a = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , A__ , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=A__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , A__ , )
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'Wav2Vec2FeatureExtractor'
SCREAMING_SNAKE_CASE_ : Any = 'AutoTokenizer'
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Tuple:
super().__init__(__lowerCamelCase , __lowerCamelCase )
a = self.feature_extractor
a = False
@classmethod
def __UpperCAmelCase ( cls : int , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> Dict:
try:
return super().from_pretrained(__lowerCamelCase , **__lowerCamelCase )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __lowerCamelCase , )
a = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = WavaVecaCTCTokenizer.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
return cls(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
def __call__( self : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[Any] ) -> List[Any]:
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a = kwargs.pop("raw_speech" )
else:
a = kwargs.pop("audio" , __lowerCamelCase )
a = kwargs.pop("sampling_rate" , __lowerCamelCase )
a = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
a = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a = encodings["input_ids"]
return inputs
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ) -> Optional[int]:
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
a = kwargs.pop("input_features" , __lowerCamelCase )
a = kwargs.pop("labels" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a = args[0]
a = args[1:]
if input_features is not None:
a = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
a = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a = labels["input_ids"]
return input_features
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> int:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def __UpperCAmelCase ( self : Dict ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a = True
a = self.tokenizer
yield
a = self.feature_extractor
a = False
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
import operator as op
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
a = lambda A, A : int(x / y ) # noqa: E731 integer division operation
a = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ), "Action".center(12 ), "Stack", sep=" | " )
print("-" * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ("push(" + x + ")").ljust(12 ), ",".join(lowercase__ ), sep=" | " )
else:
a = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + b + ")").ljust(12 ), ",".join(lowercase__ ), sep=" | " )
a = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + a + ")").ljust(12 ), ",".join(lowercase__ ), sep=" | " )
stack.append(
str(opr[x](int(lowercase__ ), int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ("push(" + a + x + b + ")").ljust(12 ), ",".join(lowercase__ ), sep=" | ", )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__magic_name__ : Any = TypeVar('T')
__magic_name__ : str = TypeVar('U')
class snake_case__ (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> List[Any]:
a = key
a = val
a = None
a = None
def __repr__( self : Tuple ) -> str:
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class snake_case__ (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
a = DoubleLinkedListNode(lowercase__ , lowercase__ )
a = DoubleLinkedListNode(lowercase__ , lowercase__ )
a = self.rear, self.head
def __repr__( self : Dict ) -> str:
a = ['''DoubleLinkedList''']
a = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str ) -> None:
a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
a = node
a = previous
a = node
a = self.rear
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
a = node.next
a = node.prev
a = None
a = None
return node
class snake_case__ (Generic[T, U] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict ) -> Union[str, Any]:
a = DoubleLinkedList()
a = capacity
a = 0
a = 0
a = 0
a = {}
def __repr__( self : str ) -> str:
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[Any] , __lowerCamelCase : str ) -> bool:
return key in self.cache
def __UpperCAmelCase ( self : int , __lowerCamelCase : Dict ) -> U | None:
if key in self.cache:
self.hits += 1
a = self.cache[key]
a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
a = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
a = value
self.list.add(lowercase__ )
@classmethod
def __UpperCAmelCase ( cls : int , __lowerCamelCase : str = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(__lowerCamelCase : int ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCamelCase : List[str] ) -> U:
if func not in cls.decorator_function_to_instance_map:
a = LRUCache(lowercase__ )
a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
a = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , "cache_info" , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
from copy import deepcopy
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Any = None , __lowerCamelCase : str = None ) -> List[str]:
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("Either arr or size must be specified" )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> str:
a = len(lowercase__ )
a = deepcopy(lowercase__ )
for i in range(1 , self.size ):
a = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]:
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : str ) -> Dict:
return index - (index & (-index))
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ) -> Any:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(lowercase__ )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] ) -> int:
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(lowercase__ )
return result
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> str:
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Tuple ) -> Optional[int]:
return self.query(lowercase__ , index + 1 )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] ) -> Dict:
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case__ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = """vit_msn"""
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any]=7_68 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-06 , __lowerCamelCase : Any=2_24 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**__snake_case )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__lowerCAmelCase : Dict = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def __magic_name__ ( A : str ):
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__lowerCAmelCase : int = parser.parse_args()
if args.check_lib:
__lowerCAmelCase : List[Any] = importlib.import_module('transformers')
__lowerCAmelCase : Dict = Path(transformers_module.__file__).parent
else:
__lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__ ( A : Tuple ):
'''simple docstring'''
a = model.config
a = DonutSwinConfig(
image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, )
a = MBartConfig(
is_decoder=lowercase_, is_encoder_decoder=lowercase_, add_cross_attention=lowercase_, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len(
model.decoder.tokenizer ), scale_embedding=lowercase_, add_final_layer_norm=lowercase_, )
return encoder_config, decoder_config
def __magic_name__ ( A : Optional[Any] ):
'''simple docstring'''
if "encoder.model" in name:
a = name.replace("encoder.model", "encoder" )
if "decoder.model" in name:
a = name.replace("decoder.model", "decoder" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm", "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
a = "encoder." + name
if "attn.proj" in name:
a = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name and "mask" not in name:
a = name.replace("attn", "attention.self" )
if "norm1" in name:
a = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2", "output.dense" )
if name == "encoder.norm.weight":
a = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
a = "encoder.layernorm.bias"
return name
def __magic_name__ ( A : str, A : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = int(key_split[5] )
a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a = val
return orig_state_dict
def __magic_name__ ( A : List[Any], A : Union[str, Any]=None, A : Tuple=False ):
'''simple docstring'''
a = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
a = get_configs(lowercase_ )
a = DonutSwinModel(lowercase_ )
a = MBartForCausalLM(lowercase_ )
a = VisionEncoderDecoderModel(encoder=lowercase_, decoder=lowercase_ )
model.eval()
a = original_model.state_dict()
a = convert_state_dict(lowercase_, lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
a = load_dataset("hf-internal-testing/example-documents" )
a = dataset["test"][0]["image"].convert("RGB" )
a = XLMRobertaTokenizerFast.from_pretrained(lowercase_, from_slow=lowercase_ )
a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] )
a = DonutProcessor(lowercase_, lowercase_ )
a = processor(lowercase_, return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a = "When is the coffee break?"
a = task_prompt.replace("{user_input}", lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a = "hello world"
else:
raise ValueError("Model name not supported" )
a = original_model.decoder.tokenizer(lowercase_, add_special_tokens=lowercase_, return_tensors="pt" )[
"input_ids"
]
a = original_model.encoder.model.patch_embed(lowercase_ )
a = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_, lowercase_, atol=1E-3 )
# verify encoder hidden states
a = original_model.encoder(lowercase_ )
a = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_, lowercase_, atol=1E-2 )
# verify decoder hidden states
a = original_model(lowercase_, lowercase_, lowercase_ ).logits
a = model(lowercase_, decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_, lowercase_, atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1], commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1], commit_message="Update model" )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
__lowerCAmelCase : Any = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
def __magic_name__ ( A : str ):
'''simple docstring'''
if isinstance(__UpperCamelCase, __UpperCamelCase ):
raise TypeError("\'float\' object cannot be interpreted as an integer" )
if isinstance(__UpperCamelCase, __UpperCamelCase ):
raise TypeError("\'str\' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a = False
if num < 0:
a = True
a = -num
a = []
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCamelCase ) for e in binary )
return "0b" + "".join(str(__UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
from sklearn.metrics import recall_score
import datasets
__lowerCAmelCase : Optional[Any] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__lowerCAmelCase : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__lowerCAmelCase : Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]="binary" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]="warn" , ) -> Optional[Any]:
a = recall_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase , zero_division=_lowerCAmelCase , )
return {"recall": float(_lowerCAmelCase ) if score.size == 1 else score}
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class snake_case__ (UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = """microsoft/speecht5_tts"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
SCREAMING_SNAKE_CASE_ : Dict = """text_reader"""
SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaProcessor
SCREAMING_SNAKE_CASE_ : str = SpeechTaForTextToSpeech
SCREAMING_SNAKE_CASE_ : str = SpeechTaHifiGan
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""text"""]
SCREAMING_SNAKE_CASE_ : Any = ["""audio"""]
def __UpperCAmelCase ( self : Tuple ) -> str:
if self.post_processor is None:
a = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=None ) -> Optional[Any]:
a = self.pre_processor(text=_a , return_tensors="pt" , truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
a = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
a = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCAmelCase ( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**_a )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
import math
def __magic_name__ ( A : int ):
'''simple docstring'''
assert isinstance(__UpperCamelCase, __UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
a = range(3, int(math.sqrt(__UpperCamelCase ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def __magic_name__ ( A : str, A : List[Any]=1, **A : str ):
'''simple docstring'''
a = factor * value
a = value
while not is_prime(__UpperCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **__UpperCamelCase )
return value
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(lowerCamelCase_ )
a = cos(lowerCamelCase_ )
a = _sin / (2 * q_factor)
a = (1 - _cos) / 2
a = 1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(lowerCamelCase_ )
a = cos(lowerCamelCase_ )
a = _sin / (2 * q_factor)
a = (1 + _cos) / 2
a = -1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(lowerCamelCase_ )
a = cos(lowerCamelCase_ )
a = _sin / (2 * q_factor)
a = _sin / 2
a = 0
a = -ba
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(lowerCamelCase_ )
a = cos(lowerCamelCase_ )
a = _sin / (2 * q_factor)
a = 1 - alpha
a = -2 * _cos
a = 1 + alpha
a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(lowerCamelCase_ )
a = cos(lowerCamelCase_ )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = 1 + alpha * big_a
a = -2 * _cos
a = 1 - alpha * big_a
a = 1 + alpha / big_a
a = -2 * _cos
a = 1 - alpha / big_a
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(lowerCamelCase_ )
a = cos(lowerCamelCase_ )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(lowerCamelCase_ ) * alpha
a = big_a * (pmc + aaa)
a = 2 * big_a * mpc
a = big_a * (pmc - aaa)
a = ppmc + aaa
a = -2 * pmpc
a = ppmc - aaa
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(lowerCamelCase_ )
a = cos(lowerCamelCase_ )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(lowerCamelCase_ ) * alpha
a = big_a * (ppmc + aaa)
a = -2 * big_a * pmpc
a = big_a * (ppmc - aaa)
a = pmc + aaa
a = 2 * mpc
a = pmc - aaa
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ (_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
class snake_case__ (_A , _A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
@register_to_config
def __init__( self : int , __lowerCamelCase : Optional[int] = 3 , __lowerCamelCase : Dict = 3 , __lowerCamelCase : Tuple = ("DownEncoderBlock2D",) , __lowerCamelCase : Optional[int] = ("UpDecoderBlock2D",) , __lowerCamelCase : Optional[int] = (64,) , __lowerCamelCase : Tuple = 1 , __lowerCamelCase : List[str] = "silu" , __lowerCamelCase : Any = 4 , __lowerCamelCase : Union[str, Any] = 32 , __lowerCamelCase : Optional[Any] = 32 , __lowerCamelCase : Optional[Any] = 0.18_215 , ) -> int:
super().__init__()
# pass init params to Encoder
a = Encoder(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , down_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , act_fn=__lowerCamelCase , norm_num_groups=__lowerCamelCase , double_z=__lowerCamelCase , )
# pass init params to Decoder
a = Decoder(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , up_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , norm_num_groups=__lowerCamelCase , act_fn=__lowerCamelCase , )
a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
a = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
a = False
a = False
# only relevant if vae tiling is enabled
a = self.config.sample_size
a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
a = 0.25
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any=False ) -> str:
if isinstance(__lowerCamelCase , (Encoder, Decoder) ):
a = value
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any] = True ) -> List[str]:
a = use_tiling
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
self.enable_tiling(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = True
def __UpperCAmelCase ( self : str ) -> Tuple:
a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self : int ) -> Dict[str, AttentionProcessor]:
a = {}
def fn_recursive_add_processors(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if hasattr(__lowerCamelCase , "set_processor" ):
a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , __lowerCamelCase , __lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return processors
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = len(self.attn_processors.keys() )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(__lowerCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : str ):
if hasattr(__lowerCamelCase , "set_processor" ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
module.set_processor(__lowerCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , __lowerCamelCase , __lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : int = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__lowerCamelCase , return_dict=__lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
a = [self.encoder(__lowerCamelCase ) for x_slice in x.split(1 )]
a = torch.cat(__lowerCamelCase )
else:
a = self.encoder(__lowerCamelCase )
a = self.quant_conv(__lowerCamelCase )
a = DiagonalGaussianDistribution(__lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__lowerCamelCase , return_dict=__lowerCamelCase )
a = self.post_quant_conv(__lowerCamelCase )
a = self.decoder(__lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
a = [self._decode(__lowerCamelCase ).sample for z_slice in z.split(1 )]
a = torch.cat(__lowerCamelCase )
else:
a = self._decode(__lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ) -> Dict:
a = min(a.shape[2] , b.shape[2] , __lowerCamelCase )
for y in range(__lowerCamelCase ):
a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Dict:
a = min(a.shape[3] , b.shape[3] , __lowerCamelCase )
for x in range(__lowerCamelCase ):
a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any = True ) -> AutoencoderKLOutput:
a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
a = int(self.tile_latent_min_size * self.tile_overlap_factor )
a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a = []
for i in range(0 , x.shape[2] , __lowerCamelCase ):
a = []
for j in range(0 , x.shape[3] , __lowerCamelCase ):
a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a = self.encoder(__lowerCamelCase )
a = self.quant_conv(__lowerCamelCase )
row.append(__lowerCamelCase )
rows.append(__lowerCamelCase )
a = []
for i, row in enumerate(__lowerCamelCase ):
a = []
for j, tile in enumerate(__lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a = self.blend_v(rows[i - 1][j] , __lowerCamelCase , __lowerCamelCase )
if j > 0:
a = self.blend_h(row[j - 1] , __lowerCamelCase , __lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowerCamelCase , dim=3 ) )
a = torch.cat(__lowerCamelCase , dim=2 )
a = DiagonalGaussianDistribution(__lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] = True ) -> Union[DecoderOutput, torch.FloatTensor]:
a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
a = int(self.tile_sample_min_size * self.tile_overlap_factor )
a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a = []
for i in range(0 , z.shape[2] , __lowerCamelCase ):
a = []
for j in range(0 , z.shape[3] , __lowerCamelCase ):
a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a = self.post_quant_conv(__lowerCamelCase )
a = self.decoder(__lowerCamelCase )
row.append(__lowerCamelCase )
rows.append(__lowerCamelCase )
a = []
for i, row in enumerate(__lowerCamelCase ):
a = []
for j, tile in enumerate(__lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a = self.blend_v(rows[i - 1][j] , __lowerCamelCase , __lowerCamelCase )
if j > 0:
a = self.blend_h(row[j - 1] , __lowerCamelCase , __lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowerCamelCase , dim=3 ) )
a = torch.cat(__lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int = False , __lowerCamelCase : str = True , __lowerCamelCase : List[str] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
a = sample
a = self.encode(__lowerCamelCase ).latent_dist
if sample_posterior:
a = posterior.sample(generator=__lowerCamelCase )
else:
a = posterior.mode()
a = self.decode(__lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( A : Union[str, Any], A : Tuple, A : int ):
'''simple docstring'''
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def _UpperCamelCase ( A : Union[str, Any], A : List[Any], A : Dict, A : Optional[int]="attention" ):
'''simple docstring'''
a = a = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
a = k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] )
a = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
a = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] )
a = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
a = q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] )
a = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
a = v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _UpperCamelCase ( A : Tuple, A : Any, A : int, A : Optional[int]=False ):
'''simple docstring'''
if split_mlp_wi:
a = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
a = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
a = (wi_a, wi_a)
else:
a = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
a = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def _UpperCamelCase ( A : Tuple, A : List[str], A : Any, A : Any ):
'''simple docstring'''
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def _UpperCamelCase ( A : List[str], *, A : int, A : Optional[Any], A : List[Any] = False ):
'''simple docstring'''
a = traverse_util.flatten_dict(variables["target"] )
a = {"/".join(a__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:", a__ )
a = collections.OrderedDict()
# Shared embeddings.
a = old["token_embedder/embedding"]
# Encoder.
for i in range(a__ ):
# Block i, layer 0 (Self Attention).
a = tax_layer_norm_lookup(a__, a__, "encoder", "pre_attention_layer_norm" )
a , a , a , a = tax_attention_lookup(a__, a__, "encoder", "attention" )
a = layer_norm
a = k.T
a = o.T
a = q.T
a = v.T
# Block i, layer 1 (MLP).
a = tax_layer_norm_lookup(a__, a__, "encoder", "pre_mlp_layer_norm" )
a , a = tax_mlp_lookup(a__, a__, "encoder", a__ )
a = layer_norm
if split_mlp_wi:
a = wi[0].T
a = wi[1].T
else:
a = wi.T
a = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a = tax_relpos_bias_lookup(
a__, a__, "encoder" ).T
a = old["encoder/encoder_norm/scale"]
if not scalable_attention:
a = tax_relpos_bias_lookup(
a__, 0, "encoder" ).T
a = tax_relpos_bias_lookup(
a__, 0, "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(a__ ):
# Block i, layer 0 (Self Attention).
a = tax_layer_norm_lookup(a__, a__, "decoder", "pre_self_attention_layer_norm" )
a , a , a , a = tax_attention_lookup(a__, a__, "decoder", "self_attention" )
a = layer_norm
a = k.T
a = o.T
a = q.T
a = v.T
# Block i, layer 1 (Cross Attention).
a = tax_layer_norm_lookup(a__, a__, "decoder", "pre_cross_attention_layer_norm" )
a , a , a , a = tax_attention_lookup(a__, a__, "decoder", "encoder_decoder_attention" )
a = layer_norm
a = k.T
a = o.T
a = q.T
a = v.T
# Block i, layer 2 (MLP).
a = tax_layer_norm_lookup(a__, a__, "decoder", "pre_mlp_layer_norm" )
a , a = tax_mlp_lookup(a__, a__, "decoder", a__ )
a = layer_norm
if split_mlp_wi:
a = wi[0].T
a = wi[1].T
else:
a = wi.T
a = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a = tax_relpos_bias_lookup(a__, a__, "decoder" ).T
a = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a = old["decoder/logits_dense/kernel"].T
return new
def _UpperCamelCase ( A : List[str], A : Dict ):
'''simple docstring'''
a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
a = state_dict["shared.weight"]
return state_dict
def _UpperCamelCase ( A : Any, A : Union[str, Any], A : Any, A : int, A : List[str] ):
'''simple docstring'''
a = checkpoints.load_tax_checkpoint(a__ )
a = convert_tax_to_pytorch(
a__, num_layers=config.num_layers, is_encoder_only=a__, scalable_attention=a__ )
a = make_state_dict(a__, a__ )
model.load_state_dict(a__, strict=a__ )
def _UpperCamelCase ( A : Optional[Any], A : str, A : Optional[Any], A : Optional[int] = False, A : int = False, ):
'''simple docstring'''
a = MTaConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a = UMTaEncoderModel(a__ )
else:
a = UMTaForConditionalGeneration(a__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a__, a__, a__, a__, a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(a__ )
# Verify that we can load the checkpoint.
model.from_pretrained(a__ )
print("Done" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__lowerCAmelCase : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
from math import pow, sqrt
def __magic_name__ ( *A : float ):
'''simple docstring'''
a = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def __magic_name__ ( A : float, A : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ), 6 )
if validate(UpperCamelCase__, UpperCamelCase__ )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __magic_name__ ( A : float, A : float, A : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ), 6 )
if validate(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __magic_name__ ( A : float, A : float, A : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ), 6 )
if validate(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __magic_name__ ( A : float, A : float, A : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a, 2 ), 6 )
if validate(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __magic_name__ ( A : float, A : float, A : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a, 2 ) / molar_mass, 6 )
if validate(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class snake_case__ (__UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = '''wavlm'''
def __init__( self : int , __lowerCamelCase : int=32 , __lowerCamelCase : Optional[int]=7_68 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Optional[int]=1e-5 , __lowerCamelCase : int="group" , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCamelCase : int=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : Any=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=1_28 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Dict=3_20 , __lowerCamelCase : str=8_00 , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=0.05 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : str=3_20 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[int]=1_00 , __lowerCamelCase : Optional[Any]=2_56 , __lowerCamelCase : List[str]=2_56 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[str]="mean" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=2_56 , __lowerCamelCase : Any=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCamelCase : List[str]=(5, 3, 3, 1, 1) , __lowerCamelCase : List[str]=(1, 2, 3, 1, 1) , __lowerCamelCase : Optional[Any]=5_12 , __lowerCamelCase : Any=80 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : int=1 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=None , **__lowerCamelCase : int , ) -> Any:
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = conv_bias
a = num_buckets
a = max_bucket_distance
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layerdrop
a = layer_norm_eps
a = initializer_range
a = num_ctc_classes
a = vocab_size
a = do_stable_layer_norm
a = use_weighted_layer_sum
a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
# parameters for pretraining with codevector quantized representations
a = num_codevectors_per_group
a = num_codevector_groups
a = contrastive_logits_temperature
a = num_negatives
a = codevector_dim
a = proj_codevector_dim
a = diversity_loss_weight
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# adapter
a = add_adapter
a = adapter_kernel_size
a = adapter_stride
a = num_adapter_layers
a = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = xvector_output_dim
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
from __future__ import annotations
def __magic_name__ ( A : int ):
'''simple docstring'''
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A )
if n > 1:
factors.append(A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__lowerCAmelCase : Any = parser.parse_args()
__lowerCAmelCase : Tuple = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __magic_name__ ( A : Union[str, Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __magic_name__ ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
a = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowerCamelCase, _lowerCamelCase, num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowerCamelCase, _lowerCamelCase, num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc", [2, -1] )
def __magic_name__ ( A : int ):
'''simple docstring'''
a = [1, 2]
a = {"""a""": 1, """b""": 2}
a = {"""a""": [1, 2], """b""": [3, 4]}
a = {"""a""": {"""1""": 1}, """b""": 2}
a = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
a = [2, 3]
a = {"""a""": 2, """b""": 3}
a = {"""a""": [2, 3], """b""": [4, 5]}
a = {"""a""": {"""1""": 2}, """b""": 3}
a = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("spark" ):
assert map_nested(_lowerCamelCase, _lowerCamelCase, num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase, _lowerCamelCase, num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase, _lowerCamelCase, num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase, _lowerCamelCase, num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase, _lowerCamelCase, num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
from PIL import Image
def __magic_name__ ( A : Image ):
'''simple docstring'''
a = image.size
a = 0
a = image.load()
for i in range(lowercase_ ):
for j in range(lowercase_ ):
a = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase_ ):
for i in range(lowercase_ ):
a = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase : Dict = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__lowerCAmelCase : int = namedtuple('covid_data', 'cases deaths recovered')
def __magic_name__ ( A : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
a = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase__ ).content ).xpath(lowerCAmelCase__ ) )
__lowerCAmelCase : Dict = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase : int = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ (snake_case_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CanineTokenizer
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return CanineTokenizer.from_pretrained("google/canine-s" )
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> List[str]:
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
a = 10_24
return tokenizer
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.canine_tokenizer
a = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
a = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
a = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = self.canine_tokenizer
a = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
a = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a = self.canine_tokenizer
a = [
"What's the weater?",
"It's about 25 degrees.",
]
a = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = " He is very happy, UNwant\u00E9d,running"
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
a = tokenizer.__class__.from_pretrained(__lowerCamelCase )
a = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = " He is very happy, UNwant\u00E9d,running"
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe0_07 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
a = tokenizer.__class__.from_pretrained(__lowerCamelCase )
a = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a , a = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
a = 0Xe0_05
a = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
a = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
a = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a = chr(0Xe0_05 )
a = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(__lowerCamelCase )
a = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
a = 0Xe0_06
a = chr(__lowerCamelCase )
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
a = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
a = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
a = 0Xe0_06
a = chr(__lowerCamelCase )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe0_07
a = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
a = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
a = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a = "hello world"
if self.space_between_special_tokens:
a = "[CLS] hello world [SEP]"
else:
a = input
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
a = "a"
a = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
a = 0Xe0_06
a = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : Tuple ) -> int:
pass
def __UpperCAmelCase ( self : int ) -> int:
pass
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
pass
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
pass
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
def __magic_name__ ( A : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(SCREAMING_SNAKE_CASE_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Dict = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
from __future__ import annotations
import pandas as pd
def __magic_name__ ( A : list[int], A : list[int], A : int ):
'''simple docstring'''
a = [0] * no_of_processes
a = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowercase ):
a = burst_time[i]
a = 0
a = 0
a = 999999999
a = 0
a = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
a = remaining_time[j]
a = j
a = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
a = remaining_time[short]
if minm == 0:
a = 999999999
if remaining_time[short] == 0:
complete += 1
a = False
# Find finish time of current process
a = increment_time + 1
# Calculate waiting time
a = finish_time - arrival_time[short]
a = finar - burst_time[short]
if waiting_time[short] < 0:
a = 0
# Increment time
increment_time += 1
return waiting_time
def __magic_name__ ( A : list[int], A : int, A : list[int] ):
'''simple docstring'''
a = [0] * no_of_processes
for i in range(__lowercase ):
a = burst_time[i] + waiting_time[i]
return turn_around_time
def __magic_name__ ( A : list[int], A : list[int], A : int ):
'''simple docstring'''
a = 0
a = 0
for i in range(__lowercase ):
a = total_waiting_time + waiting_time[i]
a = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =", total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
__lowerCAmelCase : Any = int(input())
__lowerCAmelCase : Tuple = [0] * no_of_processes
__lowerCAmelCase : Tuple = [0] * no_of_processes
__lowerCAmelCase : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = map(int, input().split())
__lowerCAmelCase : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : List[str] = burst_time
__lowerCAmelCase : List[str] = no_of_processes
__lowerCAmelCase : Tuple = waiting_time
__lowerCAmelCase : str = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowerCAmelCase : Tuple = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : Tuple = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
def __magic_name__ ( A : Optional[Any], A : int, A : Dict ):
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a = [p / w for p, w in zip(lowerCAmelCase__, lowerCAmelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
a = sorted(lowerCAmelCase__ )
# declaring useful variables
a = len(lowerCAmelCase__ )
a = 0
a = 0
a = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a = sorted_profit_by_weight[length - i - 1]
a = profit_by_weight.index(lowerCAmelCase__ )
a = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__lowerCAmelCase : int = [int(x) for x in input('Input profits separated by spaces: ').split()]
__lowerCAmelCase : List[Any] = [int(x) for x in input('Input weights separated by spaces: ').split()]
__lowerCAmelCase : Optional[Any] = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
def __magic_name__ ( A : int, A : Tuple ):
'''simple docstring'''
a = len(lowerCAmelCase_ )
a = []
for i in range(len(lowerCAmelCase_ ) - pat_len + 1 ):
a = True
for j in range(lowerCAmelCase_ ):
if s[i + j] != pattern[j]:
a = False
break
if match_found:
position.append(lowerCAmelCase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __magic_name__ ( A : List[str], A : Any, A : Union[str, Any] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
a = quote(_snake_case )
return hfh.hf_hub_url(_snake_case, _snake_case, repo_type="dataset", revision=_snake_case )
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase : Dict = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase : int = 'UperNetConfig'
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int = 0 , __lowerCamelCase : Tuple = False , __lowerCamelCase : Optional[int] = 1 , ) -> None:
super().__init__()
a = nn.Convad(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE , )
a = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
a = nn.ReLU()
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> torch.Tensor:
a = self.conv(_SCREAMING_SNAKE_CASE )
a = self.batch_norm(_SCREAMING_SNAKE_CASE )
a = self.activation(_SCREAMING_SNAKE_CASE )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> None:
super().__init__()
a = [
nn.AdaptiveAvgPoolad(_SCREAMING_SNAKE_CASE ),
UperNetConvModule(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[int] ) -> torch.Tensor:
a = input
for layer in self.layers:
a = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> None:
super().__init__()
a = pool_scales
a = align_corners
a = in_channels
a = channels
a = []
for i, pool_scale in enumerate(_SCREAMING_SNAKE_CASE ):
a = UperNetPyramidPoolingBlock(pool_scale=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , channels=_SCREAMING_SNAKE_CASE )
self.blocks.append(_SCREAMING_SNAKE_CASE )
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[torch.Tensor]:
a = []
for ppm in self.blocks:
a = ppm(_SCREAMING_SNAKE_CASE )
a = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(_SCREAMING_SNAKE_CASE )
return ppm_outs
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Dict:
super().__init__()
a = config
a = config.pool_scales # e.g. (1, 2, 3, 6)
a = in_channels
a = config.hidden_size
a = False
a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a = nn.ModuleList()
a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a = UperNetConvModule(_SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_SCREAMING_SNAKE_CASE )
self.fpn_convs.append(_SCREAMING_SNAKE_CASE )
a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __UpperCAmelCase ( self : str ) -> List[str]:
self.apply(self._init_weights )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Dict ) -> Dict:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] ) -> List[str]:
a = inputs[-1]
a = [x]
psp_outs.extend(self.psp_modules(_SCREAMING_SNAKE_CASE ) )
a = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
a = self.bottleneck(_SCREAMING_SNAKE_CASE )
return output
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ) -> torch.Tensor:
# build laterals
a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_SCREAMING_SNAKE_CASE ) )
# build top-down path
a = len(_SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a = laterals[i - 1].shape[2:]
a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_SCREAMING_SNAKE_CASE , mode="bilinear" , align_corners=self.align_corners )
# build outputs
a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
a = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
a = self.fpn_bottleneck(_SCREAMING_SNAKE_CASE )
a = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : str = 2 , __lowerCamelCase : Dict = 3 , __lowerCamelCase : Union[str, Any] = 1 ) -> None:
super().__init__()
a = config
a = config.auxiliary_in_channels
a = config.auxiliary_channels
a = config.auxiliary_num_convs
a = config.auxiliary_concat_input
a = in_index
a = (kernel_size // 2) * dilation
a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
a = nn.Identity()
else:
a = nn.Sequential(*_SCREAMING_SNAKE_CASE )
if self.concat_input:
a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __UpperCAmelCase ( self : Tuple ) -> str:
self.apply(self._init_weights )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> Optional[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> torch.Tensor:
# just take the relevant feature maps
a = encoder_hidden_states[self.in_index]
a = self.convs(_SCREAMING_SNAKE_CASE )
if self.concat_input:
a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class snake_case__ (lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = UperNetConfig
SCREAMING_SNAKE_CASE_ : Tuple = "pixel_values"
SCREAMING_SNAKE_CASE_ : Tuple = True
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ) -> Tuple:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str=False ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a = value
__lowerCAmelCase : List[str] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCAmelCase__ , )
class snake_case__ (lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any] ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE )
a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a = UperNetHead(_SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
a = UperNetFCNHead(_SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] = None , __lowerCamelCase : str = None , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : Any = None , __lowerCamelCase : Tuple = None , ) -> Union[tuple, SemanticSegmenterOutput]:
a = return_dict if return_dict is not None else self.config.use_return_dict
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = output_attentions if output_attentions is not None else self.config.output_attentions
a = self.backbone.forward_with_filtered_kwargs(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
a = outputs.feature_maps
a = self.decode_head(_SCREAMING_SNAKE_CASE )
a = nn.functional.interpolate(_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE )
a = None
if self.auxiliary_head is not None:
a = self.auxiliary_head(_SCREAMING_SNAKE_CASE )
a = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE )
a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a = (logits,) + outputs[1:]
else:
a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCAmelCase : List[str] = TypeVar('KEY')
__lowerCAmelCase : str = TypeVar('VAL')
@dataclass(frozen=_A , slots=_A )
class snake_case__ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 42
SCREAMING_SNAKE_CASE_ : Dict = 42
class snake_case__ (_Item ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __bool__( self : Union[str, Any] ) -> bool:
return False
__lowerCAmelCase : str = _DeletedItem()
class snake_case__ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int = 8 , __lowerCamelCase : float = 0.75 ) -> None:
a = initial_block_size
a = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
a = capacity_factor
a = 0
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : KEY ) -> int:
return hash(UpperCamelCase__ ) % len(self._buckets )
def __UpperCAmelCase ( self : int , __lowerCamelCase : int ) -> int:
return (ind + 1) % len(self._buckets )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : KEY , __lowerCamelCase : VAL ) -> bool:
a = self._buckets[ind]
if not stored:
a = _Item(UpperCamelCase__ , UpperCamelCase__ )
self._len += 1
return True
elif stored.key == key:
a = _Item(UpperCamelCase__ , UpperCamelCase__ )
return True
else:
return False
def __UpperCAmelCase ( self : Union[str, Any] ) -> bool:
a = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
a = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __UpperCAmelCase ( self : int , __lowerCamelCase : int ) -> None:
a = self._buckets
a = [None] * new_size
a = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __UpperCAmelCase ( self : Dict ) -> None:
self._resize(len(self._buckets ) * 2 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : KEY ) -> Iterator[int]:
a = self._get_bucket_index(UpperCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
a = self._get_next_ind(UpperCamelCase__ )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : KEY , __lowerCamelCase : VAL ) -> None:
for ind in self._iterate_buckets(UpperCamelCase__ ):
if self._try_set(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
break
def __setitem__( self : str , __lowerCamelCase : KEY , __lowerCamelCase : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(UpperCamelCase__ , UpperCamelCase__ )
def __delitem__( self : int , __lowerCamelCase : KEY ) -> None:
for ind in self._iterate_buckets(UpperCamelCase__ ):
a = self._buckets[ind]
if item is None:
raise KeyError(UpperCamelCase__ )
if item is _deleted:
continue
if item.key == key:
a = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple , __lowerCamelCase : KEY ) -> VAL:
for ind in self._iterate_buckets(UpperCamelCase__ ):
a = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCamelCase__ )
def __len__( self : Optional[int] ) -> int:
return self._len
def __iter__( self : int ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ) -> str:
a = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class snake_case__ (snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '''yolos'''
def __init__( self : str , __lowerCamelCase : Any=7_68 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : int=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Optional[Any]=1e-12 , __lowerCamelCase : List[str]=[5_12, 8_64] , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : str=1_00 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[str]=1 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : int=5 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Any=0.1 , **__lowerCamelCase : List[Any] , ) -> str:
super().__init__(**_A )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class snake_case__ (snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = version.parse("""1.11""" )
@property
def __UpperCAmelCase ( self : str ) -> List[str]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __UpperCAmelCase ( self : int ) -> Tuple:
return 1e-4
@property
def __UpperCAmelCase ( self : Any ) -> int:
return 12
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BlenderbotConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = """gelu"""
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : int=True , __lowerCamelCase : int=False , __lowerCamelCase : List[str]=99 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Dict=20 , __lowerCamelCase : str=2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , ) -> Dict:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a = prepare_blenderbot_inputs_dict(__a , __a , __a )
return config, inputs_dict
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ) -> Dict:
a = TFBlenderbotModel(config=__a ).get_decoder()
a = inputs_dict["input_ids"]
a = input_ids[:1, :]
a = inputs_dict["attention_mask"][:1, :]
a = inputs_dict["head_mask"]
a = 1
# first forward pass
a = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__a , attention_mask=__a )[0]
a = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def __magic_name__ ( A : Any, A : List[Any], A : Dict, A : int=None, A : str=None, A : int=None, A : Union[str, Any]=None, A : Optional[int]=None, ):
'''simple docstring'''
if attention_mask is None:
a = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
a = TFBlenderbotModelTester(self )
a = ConfigTester(self , config_class=__a )
def __UpperCAmelCase ( self : Tuple ) -> int:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["""My friends are cool but they eat too many carbs."""]
SCREAMING_SNAKE_CASE_ : List[Any] = """facebook/blenderbot-400M-distill"""
@cached_property
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCAmelCase ( self : int ) -> List[str]:
a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
a = self.tokenizer(self.src_text , return_tensors="tf" )
a = self.model.generate(
model_inputs.input_ids , )
a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
return getitem, k
def __magic_name__ ( A : Any, A : Dict ):
'''simple docstring'''
return setitem, k, v
def __magic_name__ ( A : Dict ):
'''simple docstring'''
return delitem, k
def __magic_name__ ( A : Tuple, A : Optional[Any], *A : Optional[Any] ):
'''simple docstring'''
try:
return fun(lowerCamelCase_, *lowerCamelCase_ ), None
except Exception as e:
return None, e
__lowerCAmelCase : Optional[int] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
__lowerCAmelCase : List[str] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
__lowerCAmelCase : Union[str, Any] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
__lowerCAmelCase : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
__lowerCAmelCase : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations", (
pytest.param(_add_items, id="add items" ),
pytest.param(_overwrite_items, id="overwrite items" ),
pytest.param(_delete_items, id="delete items" ),
pytest.param(_access_absent_items, id="access absent items" ),
pytest.param(_add_with_resize_up, id="add with resize up" ),
pytest.param(_add_with_resize_down, id="add with resize down" ),
), )
def __magic_name__ ( A : int ):
'''simple docstring'''
a = HashMap(initial_block_size=4 )
a = {}
for _, (fun, *args) in enumerate(lowerCamelCase_ ):
a = _run_operation(lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_ )
a = _run_operation(lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_ )
assert my_res == py_res
assert str(lowerCamelCase_ ) == str(lowerCamelCase_ )
assert set(lowerCamelCase_ ) == set(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
assert set(my.items() ) == set(py.items() )
def __magic_name__ ( ):
'''simple docstring'''
def is_public(A : int ) -> bool:
return not name.startswith("_" )
a = {name for name in dir({} ) if is_public(lowerCamelCase_ )}
a = {name for name in dir(HashMap() ) if is_public(lowerCamelCase_ )}
assert dict_public_names > hash_public_names
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCAmelCase : str = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _UpperCamelCase ( A : Optional[int] ):
'''simple docstring'''
config.addinivalue_line(
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule" )
def _UpperCamelCase ( A : Optional[int] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def _UpperCamelCase ( A : List[str] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(A__, id=A__ )
def _UpperCamelCase ( A : str, A : Tuple ):
'''simple docstring'''
if exitstatus == 5:
a = 0
# Doctest custom flag to ignore output.
__lowerCAmelCase : int = doctest.register_optionflag('IGNORE_RESULT')
__lowerCAmelCase : Optional[Any] = doctest.OutputChecker
class snake_case__ (__lowerCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> List[str]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCAmelCase : Union[str, Any] = CustomOutputChecker
__lowerCAmelCase : str = HfDoctestModule
__lowerCAmelCase : List[Any] = HfDocTestParser
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
def __magic_name__ ( A : Union[str, Any], A : int ):
'''simple docstring'''
while second != 0:
a = first & second
first ^= second
a = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = int(input('Enter the first number: ').strip())
__lowerCAmelCase : Optional[Any] = int(input('Enter the second number: ').strip())
print(F'''{add(first, second) = }''')
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import os
from distutils.util import strtobool
def __magic_name__ ( A : List[str], A : Union[str, Any] ):
'''simple docstring'''
for e in env_keys:
a = int(os.environ.get(a__, -1 ) )
if val >= 0:
return val
return default
def __magic_name__ ( A : str, A : Any=False ):
'''simple docstring'''
a = os.environ.get(a__, str(a__ ) )
return strtobool(a__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def __magic_name__ ( A : Dict, A : List[Any]="no" ):
'''simple docstring'''
a = os.environ.get(a__, str(a__ ) )
return value
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : int=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : str=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : int="gelu" , __lowerCamelCase : int=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=5_12 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Dict=2 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : int=None , ) -> int:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_multiple_size
a = hidden_act
a = hidden_dropout
a = attention_dropout
a = weight_tying
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def __UpperCAmelCase ( self : Any ) -> List[Any]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : int ) -> str:
a , a , a , a = self.prepare_config_and_inputs()
a = True
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> Dict:
a = GPTNeoXJapaneseModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = True
a = GPTNeoXJapaneseModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> List[str]:
a = True
a = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
a = output_from_no_past["hidden_states"][0]
a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ (__lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
a = GPTNeoXJapaneseModelTester(self )
a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a , a , a , a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
a , a , a , a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# This regression test was failing with PyTorch < 1.3
a , a , a , a = self.model_tester.prepare_config_and_inputs_for_decoder()
a = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
a , a , a , a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase_ )
@slow
def __UpperCAmelCase ( self : str ) -> int:
a = "abeja/gpt-neox-japanese-2.7b"
a = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
a = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
a = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
a = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase_ )
a = []
for prompt in prompts:
a = tokenizer(lowerCAmelCase_ , return_tensors="pt" ).input_ids
a = model.generate(lowerCAmelCase_ , max_length=50 )
a = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ : int = 10
def __UpperCAmelCase ( self : Tuple , **__lowerCamelCase : List[str] ) -> Tuple:
a = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self : List[str] ) -> int:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
a = model(UpperCAmelCase__ , UpperCAmelCase__ )
a = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
a = output.prev_sample
a = torch.sum(torch.abs(UpperCAmelCase__ ) )
a = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type="v_prediction" )
a = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
a = model(UpperCAmelCase__ , UpperCAmelCase__ )
a = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
a = output.prev_sample
a = torch.sum(torch.abs(UpperCAmelCase__ ) )
a = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a = sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
a = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
a = model(UpperCAmelCase__ , UpperCAmelCase__ )
a = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
a = output.prev_sample
a = torch.sum(torch.abs(UpperCAmelCase__ ) )
a = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**UpperCAmelCase__ , use_karras_sigmas=UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a = sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
a = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
a = model(UpperCAmelCase__ , UpperCAmelCase__ )
a = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
a = output.prev_sample
a = torch.sum(torch.abs(UpperCAmelCase__ ) )
a = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCAmelCase : int = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCAmelCase : Any = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCAmelCase : Optional[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple="auto" , __lowerCamelCase : Dict=-1 , __lowerCamelCase : Any=0.9 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Tuple=5_00 , __lowerCamelCase : List[str]="gpt2-large" , __lowerCamelCase : str=-1 , __lowerCamelCase : str=10_24 , __lowerCamelCase : Union[str, Any]=25 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=25 , ) -> Any:
a = compute_mauve(
p_text=snake_case__ , q_text=snake_case__ , p_features=snake_case__ , q_features=snake_case__ , p_tokens=snake_case__ , q_tokens=snake_case__ , num_buckets=snake_case__ , pca_max_data=snake_case__ , kmeans_explained_var=snake_case__ , kmeans_num_redo=snake_case__ , kmeans_max_iter=snake_case__ , featurize_model_name=snake_case__ , device_id=snake_case__ , max_text_length=snake_case__ , divergence_curve_discretization_size=snake_case__ , mauve_scaling_factor=snake_case__ , verbose=snake_case__ , seed=snake_case__ , )
return out
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
__lowerCAmelCase : Optional[int] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__lowerCAmelCase : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
import torch
from transformers import AutoModel
class snake_case__ (torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(UpperCAmelCase__ , self ).__init__()
a = AutoModel.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
a = torch.nn.CosineSimilarity(3 , 1e-08 )
a = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Dict ) -> Any:
return self.bert(**UpperCAmelCase__ ).last_hidden_state
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] ) -> Optional[Any]:
return token_embeddings.sum(2 , keepdim=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=1 ) -> List[Any]:
return self.softmax(T * self.cos(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> Optional[Any]:
a = W_supports['''sizes'''].tolist()
a = W_supports['''start_token_id'''].item()
a = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a = self.BERT(**UpperCAmelCase__ )
a = self.BERT(**UpperCAmelCase__ )
a = None
a = None
a = W_supports['''input_ids'''] == start_token_id
a = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(UpperCAmelCase__ ):
if i == 0:
a = 0
else:
a = support_sizes[i - 1]
a = S[s : s + size][start_token_masks[s : s + size]]
a = S[s : s + size][end_token_masks[s : s + size]]
a = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a = torch.vstack((p_starts, p_start) )
a = torch.vstack((p_ends, p_end) )
else:
a = p_start
a = p_end
return p_starts, p_ends
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase : Tuple = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Dict = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCAmelCase : List[Any] = spec.loader.load_module()
__lowerCAmelCase : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCAmelCase : Optional[Any] = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__lowerCAmelCase : Union[str, Any] = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ):
'''simple docstring'''
a = []
for config_class in list(CONFIG_MAPPING.values() ):
a = False
# source code of `config_class`
a = inspect.getsource(_lowerCAmelCase )
a = _re_checkpoint.findall(_lowerCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
a , a = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
a = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
a = True
break
a = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
a = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("foo.json",)] )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
a = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = AutoConfig.from_pretrained("gpt2" )
a = GenerationConfig.from_model_config(__lowerCamelCase )
a = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
a = GenerationConfig()
a = {
'''max_new_tokens''': 10_24,
'''foo''': '''bar''',
}
a = copy.deepcopy(__lowerCamelCase )
a = generation_config.update(**__lowerCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"} )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = GenerationConfig()
a = '''bar'''
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase )
a = GenerationConfig.from_pretrained(__lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
a = GenerationConfig.from_model_config(__lowerCamelCase )
assert not hasattr(__lowerCamelCase , "foo" ) # no new kwargs should be initialized if from config
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCamelCase )
self.assertEqual(default_config.num_beams , 1 )
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
a = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : int ) -> int:
a = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : str ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> str:
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
a = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
from __future__ import annotations
def __magic_name__ ( A : List[Any], A : Dict, A : Dict, A : Dict ): # noqa: E741
'''simple docstring'''
while r - l > 1:
a = (l + r) // 2
if v[m] >= key:
a = m
else:
a = m # noqa: E741
return r
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return 0
a = [0] * len(UpperCamelCase__ )
a = 1
a = v[0]
for i in range(1, len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
a = v[i]
elif v[i] > tail[length - 1]:
a = v[i]
length += 1
else:
a = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
def __magic_name__ ( A : int, A : str ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : int = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Any = field(
default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Any = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ : Tuple = field(metadata={"""help""": """Should contain the data files for the task."""} )
SCREAMING_SNAKE_CASE_ : Any = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", __SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
a = processors[data_args.task_name]()
a = processor.get_labels()
a = len(__SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__SCREAMING_SNAKE_CASE, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=__SCREAMING_SNAKE_CASE, cache_dir=model_args.cache_dir, )
# Get datasets
a = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__SCREAMING_SNAKE_CASE, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
a = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__SCREAMING_SNAKE_CASE, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(A : Dict ) -> Dict:
a = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE, p.label_ids )}
# Data collator
a = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=__SCREAMING_SNAKE_CASE, args=__SCREAMING_SNAKE_CASE, train_dataset=__SCREAMING_SNAKE_CASE, eval_dataset=__SCREAMING_SNAKE_CASE, compute_metrics=__SCREAMING_SNAKE_CASE, data_collator=__SCREAMING_SNAKE_CASE, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir, "eval_results.txt" )
if trainer.is_world_master():
with open(__SCREAMING_SNAKE_CASE, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s", __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
writer.write("%s = %s\n" % (key, value) )
results.update(__SCREAMING_SNAKE_CASE )
return results
def __magic_name__ ( A : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCAmelCase : int = get_tests_dir('fixtures')
__lowerCAmelCase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__lowerCAmelCase : List[Any] = get_tests_dir('fixtures/dummy-config.json')
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> str:
a = 0
def __UpperCAmelCase ( self : Dict ) -> Any:
a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(A_ , A_ )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = AutoFeatureExtractor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a = AutoFeatureExtractor.from_pretrained(A_ ).to_dict()
config_dict.pop("feature_extractor_type" )
a = WavaVecaFeatureExtractor(**A_ )
# save in new folder
model_config.save_pretrained(A_ )
config.save_pretrained(A_ )
a = AutoFeatureExtractor.from_pretrained(A_ )
# make sure private variable is not incorrectly saved
a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(A_ , A_ )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
a = AutoFeatureExtractor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
with self.assertRaisesRegex(
A_ , "bert-base is not a local folder and is not a valid model identifier" ):
a = AutoFeatureExtractor.from_pretrained("bert-base" )
def __UpperCAmelCase ( self : str ) -> List[str]:
with self.assertRaisesRegex(
A_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a = AutoFeatureExtractor.from_pretrained(A_ , revision="aaaaaa" )
def __UpperCAmelCase ( self : List[str] ) -> Any:
with self.assertRaisesRegex(
A_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def __UpperCAmelCase ( self : int ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A_ ):
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=A_ )
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=A_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A_ )
a = AutoFeatureExtractor.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
try:
AutoConfig.register("custom" , A_ )
AutoFeatureExtractor.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoFeatureExtractor.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
a = CustomFeatureExtractor.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A_ )
a = AutoFeatureExtractor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
class snake_case__ (_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = True
try:
AutoConfig.register("custom" , A_ )
AutoFeatureExtractor.register(A_ , A_ )
# If remote code is not set, the default is to use local
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=A_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=A_ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(A_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__ ( A : str, A : List[str] ):
a = old_name
if "patch_embed" in old_name:
a = old_name.split("." )
if layer == "0":
a = old_name.replace("0", "convolution1" )
elif layer == "1":
a = old_name.replace("1", "batchnorm_before" )
elif layer == "3":
a = old_name.replace("3", "convolution2" )
else:
a = old_name.replace("4", "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d", _lowerCamelCase ):
a = R"\b\d{2}\b"
if bool(re.search(_lowerCamelCase, _lowerCamelCase ) ):
a = re.search(R"\d\.\d\d.", _lowerCamelCase ).group()
else:
a = re.search(R"\d\.\d.", _lowerCamelCase ).group()
if int(match[0] ) < 6:
a = old_name.replace(_lowerCamelCase, "" )
a = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a = "intermediate_stages." + trimmed_name
else:
a = old_name.replace(_lowerCamelCase, "" )
if int(match[2] ) < num_meta4D_last_stage:
a = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2] )
else:
a = str(int(match[2] ) - num_meta4D_last_stage )
a = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a = trimmed_name.replace("norm1", "layernorm1" )
elif "norm2" in old_name:
a = trimmed_name.replace("norm2", "layernorm2" )
elif "fc1" in old_name:
a = trimmed_name.replace("fc1", "linear_in" )
elif "fc2" in old_name:
a = trimmed_name.replace("fc2", "linear_out" )
a = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d.", _lowerCamelCase ):
a = old_name.replace("network", "intermediate_stages" )
if "fc" in new_name:
a = new_name.replace("fc", "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a = new_name.replace("norm1", "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a = new_name.replace("norm2", "batchnorm_after" )
if "proj" in new_name:
a = new_name.replace("proj", "projection" )
if "dist_head" in new_name:
a = new_name.replace("dist_head", "distillation_classifier" )
elif "head" in new_name:
a = new_name.replace("head", "classifier" )
elif "patch_embed" in new_name:
a = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a = new_name.replace("norm", "layernorm" )
a = "efficientformer." + new_name
else:
a = "efficientformer.encoder." + new_name
return new_name
def __magic_name__ ( A : List[Any], A : Any ):
for key in checkpoint.copy().keys():
a = checkpoint.pop(_lowerCamelCase )
a = val
return checkpoint
def __magic_name__ ( ):
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(_lowerCamelCase, stream=_lowerCamelCase ).raw )
return image
def __magic_name__ ( A : Path, A : Path, A : Path, A : bool ):
a = torch.load(_lowerCamelCase, map_location="cpu" )["model"]
a = EfficientFormerConfig.from_json_file(_lowerCamelCase )
a = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
a = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a = config.depths[-1] - config.num_metaad_blocks + 1
a = convert_torch_checkpoint(_lowerCamelCase, _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a = prepare_img()
a = 256
a = 224
a = EfficientFormerImageProcessor(
size={"shortest_edge": image_size}, crop_size={"height": crop_size, "width": crop_size}, resample=pillow_resamplings["bicubic"], )
a = processor(images=_lowerCamelCase, return_tensors="pt" ).pixel_values
# original processing pipeline
a = Compose(
[
Resize(_lowerCamelCase, interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase, _lowerCamelCase ),
] )
a = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase, _lowerCamelCase )
a = model(_lowerCamelCase )
a = outputs.logits
a = (1, 1000)
if "l1" in model_name:
a = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10], _lowerCamelCase, atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10], _lowerCamelCase, atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message="Add model", use_temp_dir=_lowerCamelCase, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message="Add image processor", use_temp_dir=_lowerCamelCase, )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
__lowerCAmelCase : int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : int = get_tests_dir('fixtures/test_sentencepiece.model')
__lowerCAmelCase : List[Any] = {"target_lang": "fi", "source_lang": "en"}
__lowerCAmelCase : int = ">>zh<<"
__lowerCAmelCase : str = "Helsinki-NLP/"
if is_torch_available():
__lowerCAmelCase : int = "pt"
elif is_tf_available():
__lowerCAmelCase : Optional[Any] = "tf"
else:
__lowerCAmelCase : int = "jax"
@require_sentencepiece
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MarianTokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
a = Path(self.tmpdirname )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
a = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : List[Any] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ) -> List[str]:
return (
"This is a test",
"This is a test",
)
def __UpperCAmelCase ( self : Tuple ) -> Any:
a = "</s>"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__UpperCamelCase ) , 9 )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
a = en_de_tokenizer(["I am a small frog"] , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
a = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(__UpperCamelCase , batch.input_ids[0] )
a = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCamelCase )
a = [x.name for x in Path(__UpperCamelCase ).glob("*" )]
self.assertIn("source.spm" , __UpperCamelCase )
MarianTokenizer.from_pretrained(__UpperCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
a = self.get_tokenizer()
a = tok(
["I am a small frog" * 10_00, "I am a small frog"] , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_tokenizer()
a = tok(["I am a tiny frog", "I am a small frog"] , padding=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
# fmt: off
a = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def __UpperCAmelCase ( self : int ) -> List[Any]:
a = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
a = "Tämä on testi"
a = "This is a test"
a = [76, 7, 20_47, 2]
a = [69, 12, 11, 9_40, 2]
a = tokenizer(__UpperCamelCase ).input_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
a = tokenizer(text_target=__UpperCamelCase ).input_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
a = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class snake_case__ (A_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = '''wavlm'''
def __init__( self : Any , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : Any=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Any=30_72 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Tuple="group" , __lowerCamelCase : int="gelu" , __lowerCamelCase : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCamelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : int=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=1_28 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : int=3_20 , __lowerCamelCase : Any=8_00 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=0.05 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=10 , __lowerCamelCase : List[str]=3_20 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=1_00 , __lowerCamelCase : Any=2_56 , __lowerCamelCase : List[str]=2_56 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]="mean" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=2_56 , __lowerCamelCase : str=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCamelCase : Optional[Any]=(5, 3, 3, 1, 1) , __lowerCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , __lowerCamelCase : int=5_12 , __lowerCamelCase : int=80 , __lowerCamelCase : str=0 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : List[str] , ) -> List[Any]:
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(__lowerCamelCase )
a = list(__lowerCamelCase )
a = list(__lowerCamelCase )
a = conv_bias
a = num_buckets
a = max_bucket_distance
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layerdrop
a = layer_norm_eps
a = initializer_range
a = num_ctc_classes
a = vocab_size
a = do_stable_layer_norm
a = use_weighted_layer_sum
a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
# parameters for pretraining with codevector quantized representations
a = num_codevectors_per_group
a = num_codevector_groups
a = contrastive_logits_temperature
a = num_negatives
a = codevector_dim
a = proj_codevector_dim
a = diversity_loss_weight
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# adapter
a = add_adapter
a = adapter_kernel_size
a = adapter_stride
a = num_adapter_layers
a = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a = list(__lowerCamelCase )
a = list(__lowerCamelCase )
a = list(__lowerCamelCase )
a = xvector_output_dim
@property
def __UpperCAmelCase ( self : Any ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=UpperCamelCase__ , )
assert hasattr(self , "env" )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> int:
# configuration for running training on smdistributed Model Parallel
a = {
'enabled': True,
'processes_per_host': 8,
}
a = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
a = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
a = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="py36" , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict ) -> str:
# create estimator
a = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase__ )
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase : Optional[int] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __magic_name__ ( A : Optional[int], A : Optional[int], A : Any=None, A : int=None, A : List[str]=None, A : str=None, A : Dict=None, A : Union[str, Any]=None, ):
'''simple docstring'''
if attention_mask is None:
a = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
a = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
a = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Any=99 , __lowerCamelCase : int=16 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Any=32 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Any=0.02 , ) -> List[str]:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = initializer_range
def __UpperCAmelCase ( self : int ) -> Optional[int]:
a = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
a = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
a = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
a = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , )
a = prepare_blenderbot_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a , a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> str:
a = 20
a = model_class_name(lowerCamelCase_ )
a = model.encode(inputs_dict["input_ids"] )
a , a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
a = model.decode(lowerCamelCase_ , lowerCamelCase_ )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> Any:
a = 20
a = model_class_name(lowerCamelCase_ )
a = model.encode(inputs_dict["input_ids"] )
a , a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
a = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 99
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
a = input_ids.shape[0]
a = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a , a , a = self._get_config_and_data()
a = FlaxBlenderbotForConditionalGeneration(lowerCamelCase_ )
a = lm_model(input_ids=lowerCamelCase_ )
a = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
a = FlaxBlenderbotForConditionalGeneration(lowerCamelCase_ )
a = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
a = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
a = lm_model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
a = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
a = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
a = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
a = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case__ (a__ , unittest.TestCase , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
a = FlaxBlenderbotModelTester(self )
def __UpperCAmelCase ( self : Dict ) -> int:
a , a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
a = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(__lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : List[str] ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest("JIT Enabled" ):
a = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a = model_class(lowerCamelCase_ )
a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest("JIT Enabled" ):
a = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a = np.ones((1, 1) ) * model.config.eos_token_id
a = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
a = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
a = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=lowerCamelCase_ )
a = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
a = ["Sam"]
a = tokenizer(lowerCamelCase_ , return_tensors="jax" )
a = model.generate(**lowerCamelCase_ , **lowerCamelCase_ )
a = "Sam is a great name. It means \"sun\" in Gaelic."
a = tokenizer.batch_decode(lowerCamelCase_ , **lowerCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class snake_case__ (lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Dict ) -> str:
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCamelCase__ ) , 10_12 )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def __UpperCAmelCase ( self : Any ) -> str:
a = XLMProphetNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = "Hello World!"
a = [3_53_89, 66_72, 49, 2]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = {"input_ids": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1, len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a = grid[0]
for row_n in range(1, len(__lowerCAmelCase ) ):
a = grid[row_n]
a = fill_row(__lowerCAmelCase, __lowerCAmelCase )
a = grid[row_n]
return grid[-1][-1]
def __magic_name__ ( A : Tuple, A : Optional[Any] ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1, len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
stooge(a_, 0, len(a_ ) - 1 )
return arr
def __magic_name__ ( A : Any, A : str, A : List[Any] ):
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(a_, a_, (h - t) )
# Recursively sort last 2/3 elements
stooge(a_, i + t, (a_) )
# Recursively sort first 2/3 elements
stooge(a_, a_, (h - t) )
if __name__ == "__main__":
__lowerCAmelCase : Any = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Optional[int] = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class snake_case__ (yaml.SafeLoader ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] ) -> str:
a = [self.constructed_objects[key_node] for key_node, _ in node.value]
a = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else key for key in keys]
a = Counter(__UpperCamelCase )
a = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Dict=False ) -> Tuple:
a = super().construct_mapping(__UpperCamelCase , deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def __magic_name__ ( A : str ):
'''simple docstring'''
a = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
a = full_content[1:].index("---" ) + 1
a = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCAmelCase__ )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __UpperCAmelCase ( cls : List[Any] , __lowerCamelCase : Path ) -> Tuple:
with open(__UpperCamelCase , encoding="utf-8" ) as readme_file:
a , a = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Path ) -> Tuple:
if path.exists():
with open(__UpperCamelCase , encoding="utf-8" ) as readme_file:
a = readme_file.read()
else:
a = None
a = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__UpperCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[str] = None ) -> str:
if readme_content is not None:
a , a = _split_yaml_from_readme(__UpperCamelCase )
a = "---\n" + self.to_yaml_string() + "---\n" + content
else:
a = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCAmelCase ( cls : List[Any] , __lowerCamelCase : str ) -> List[Any]:
a = yaml.load(__UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
a = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCamelCase , allow_unicode=__UpperCamelCase , encoding="utf-8" , ).decode("utf-8" )
__lowerCAmelCase : Dict = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCAmelCase : Optional[Any] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__lowerCAmelCase : Dict = ap.parse_args()
__lowerCAmelCase : List[str] = Path(args.readme_filepath)
__lowerCAmelCase : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : List[str] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE_ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE_ : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
a = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
a = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
a = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
a = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
a = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
a = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
a = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def __UpperCAmelCase ( self : str ) -> str:
import torch
a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = pipeline("text-classification" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
a = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
a = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = pipeline("text-classification" , framework="tf" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
a = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
a = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ) -> Optional[int]:
a = TextClassificationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ) -> Dict:
a = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a = "HuggingFace is in"
a = text_classifier(__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
a = ["HuggingFace is in ", "Paris is in France"]
a = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}, {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a = text_classifier(__lowerCamelCase , top_k=__lowerCamelCase )
a = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N, [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N] , )
a = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__lowerCamelCase ):
text_classifier(__lowerCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ (__UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Tuple = """Pix2StructImageProcessor"""
SCREAMING_SNAKE_CASE_ : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> List[str]:
a = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : Optional[Any] = True , __lowerCamelCase : Dict = False , __lowerCamelCase : Dict = None , __lowerCamelCase : int = None , __lowerCamelCase : List[str] = 20_48 , __lowerCamelCase : Optional[int] = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : List[str] = None , __lowerCamelCase : Union[str, Any] = False , __lowerCamelCase : Optional[Any] = False , __lowerCamelCase : Any = False , __lowerCamelCase : Dict = False , __lowerCamelCase : List[str] = False , __lowerCamelCase : Dict = True , __lowerCamelCase : Tuple = None , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
a = self.tokenizer
a = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
a = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , max_patches=__lowerCamelCase , **__lowerCamelCase )
else:
# add pixel_values and bbox
a = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , max_patches=__lowerCamelCase , header_text=__lowerCamelCase , **__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
a = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
if "attention_mask" in text_encoding:
a = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
a = text_encoding.pop("input_ids" )
else:
a = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def __UpperCAmelCase ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Tuple ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Tuple ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : int ) -> int:
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__lowerCAmelCase : str = "Create a default config file for Accelerate with only a few flags set."
def __magic_name__ ( A : Any="no", A : str = default_json_config_file, A : bool = False ):
'''simple docstring'''
a = Path(A )
path.parent.mkdir(parents=A, exist_ok=A )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
a = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
a = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
a = torch.cuda.device_count()
a = num_gpus
a = False
if num_gpus > 1:
a = '''MULTI_GPU'''
else:
a = '''NO'''
elif is_xpu_available() and use_xpu:
a = torch.xpu.device_count()
a = num_xpus
a = False
if num_xpus > 1:
a = '''MULTI_XPU'''
else:
a = '''NO'''
elif is_npu_available():
a = torch.npu.device_count()
a = num_npus
a = False
if num_npus > 1:
a = '''MULTI_NPU'''
else:
a = '''NO'''
else:
a = 0
a = True
a = 1
a = '''NO'''
a = ClusterConfig(**A )
config.to_json_file(A )
return path
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
a = parser.add_parser("default", parents=A, help=A, formatter_class=A )
parser.add_argument(
"--config_file", default=A, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
), dest="save_location", )
parser.add_argument(
"--mixed_precision", choices=["no", "fp16", "bf16"], type=A, help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", )
parser.set_defaults(func=A )
return parser
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
def __magic_name__ ( A : int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
a = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
a = 1
if upper_limit > 0:
a = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2, upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__lowerCAmelCase : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
from PIL import Image
def __magic_name__ ( A : Tuple, A : List[str] ):
'''simple docstring'''
def brightness(A : Tuple ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__lowerCAmelCase : Union[str, Any] = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase : Optional[Any] = list[list[float | int]]
def __magic_name__ ( A : Optional[Any], A : int ):
'''simple docstring'''
a = len(lowerCamelCase__ )
a = [[0 for _ in range(size + 1 )] for _ in range(lowerCamelCase__ )]
a = 42
a = 42
a = 42
a = 42
a = 42
a = 42
for row in range(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
a = matrix[row][col]
a = vector[row][0]
a = 0
a = 0
while row < size and col < size:
# pivoting
a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCamelCase__, lowerCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
a , a = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, lowerCamelCase__ ):
a = augmented[rowa][col] / augmented[row][col]
a = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, lowerCamelCase__ ):
for row in range(lowerCamelCase__ ):
a = augmented[row][col] / augmented[col][col]
for cola in range(lowerCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(lowerCamelCase__ )
]
def __magic_name__ ( A : Dict ):
'''simple docstring'''
a = len(lowerCamelCase__ )
a = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
a = [[0] for _ in range(lowerCamelCase__ )]
a = 42
a = 42
a = 42
a = 42
for x_val, y_val in enumerate(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
a = (x_val + 1) ** (size - col - 1)
a = y_val
a = solve(lowerCamelCase__, lowerCamelCase__ )
def interpolated_func(A : Dict ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCamelCase__ ) )
return interpolated_func
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( A : Optional[Any] = question_function, A : Union[str, Any] = 10 ):
'''simple docstring'''
a = [func(lowerCamelCase__ ) for x_val in range(1, order + 1 )]
a = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
a = 0
a = 42
a = 42
for poly in polynomials:
a = 1
while func(lowerCamelCase__ ) == poly(lowerCamelCase__ ):
x_val += 1
ret += poly(lowerCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
from __future__ import annotations
def __magic_name__ ( A : list[int], A : int ):
'''simple docstring'''
if len(A ) == 0:
return False
a = len(A ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint], A )
else:
return binary_search(a_list[midpoint + 1 :], A )
if __name__ == "__main__":
__lowerCAmelCase : str = input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : Any = [int(item.strip()) for item in user_input.split(',')]
__lowerCAmelCase : Tuple = int(input('Enter the number to be found in the list:\n').strip())
__lowerCAmelCase : Union[str, Any] = '' if binary_search(sequence, target) else 'not '
print(F'''{target} was {not_str}found in {sequence}''')
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCAmelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
__lowerCAmelCase : Tuple = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
__lowerCAmelCase : Tuple = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowerCAmelCase : Tuple = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowerCAmelCase : List[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __magic_name__ ( A : Any, A : Optional[int] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
a = k.replace(A, A )
return k
def __magic_name__ ( A : dict, A : dict ):
'''simple docstring'''
a = BigBirdPegasusConfig(**A )
a = BigBirdPegasusForConditionalGeneration(A )
a = torch_model.state_dict()
a = {}
# separating decoder weights
a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
a = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
a = DECODER_PATTERNS
a = rename_state_dict_key(A, A )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a = v.T
a = torch.from_numpy(A )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
a = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
a = REMAINING_PATTERNS
a = rename_state_dict_key(A, A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a = v.T
a = torch.from_numpy(A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
a = mapping["model.embed_positions.weight"]
a = mapping.pop("model.embed_positions.weight" )
a , a = torch_model.load_state_dict(A, strict=A )
a = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __magic_name__ ( A : Dict ):
'''simple docstring'''
a = tf.train.list_variables(A )
a = {}
a = ["global_step"]
for name, shape in tqdm(A, desc="converting tf checkpoint to dict" ):
a = any(pat in name for pat in ignore_name )
if skip_key:
continue
a = tf.train.load_variable(A, A )
a = array
return tf_weights
def __magic_name__ ( A : str, A : str, A : dict ):
'''simple docstring'''
a = get_tf_weights_as_numpy(A )
a = convert_bigbird_pegasus(A, A )
torch_model.save_pretrained(A )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : Dict = parser.parse_args()
__lowerCAmelCase : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.