code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a__ :
_a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
_a : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
_a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.task_name.lower()
class a__ ( snake_case__ ):
_a : Union[str, Any] = """train"""
_a : List[str] = """dev"""
_a : int = """test"""
class a__ ( snake_case__ ):
_a : GlueDataTrainingArguments
_a : str
_a : List[InputFeatures]
def __init__( self , _A , _A , _A = None , _A = Split.train , _A = None , ):
"""simple docstring"""
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , _A , )
__lowerCAmelCase = args
__lowerCAmelCase = glue_processors[args.task_name]()
__lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_A , _A ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
__lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase , __lowerCAmelCase = label_list[2], label_list[1]
__lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(_A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowerCAmelCase = examples[:limit_length]
__lowerCAmelCase = glue_convert_examples_to_features(
_A , _A , max_length=args.max_seq_length , label_list=_A , output_mode=self.output_mode , )
__lowerCAmelCase = time.time()
torch.save(self.features , _A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
return self.features[i]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.label_list
| 92 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
__lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCAmelCase = new_cost_f
__lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ):
__lowerCAmelCase = -1
__lowerCAmelCase = set()
__lowerCAmelCase = set()
__lowerCAmelCase = {source: 0}
__lowerCAmelCase = {destination: 0}
__lowerCAmelCase = {source: None}
__lowerCAmelCase = {destination: None}
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCAmelCase , __lowerCAmelCase = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCAmelCase = shortest_distance
return shortest_path_distance
UpperCamelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
'''simple docstring'''
UpperCamelCase__ : int = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert type(a_ ) in (int, float) and decimal == int(a_ )
A_ : Union[str, Any] = int(a_ )
A_ : List[Any] = """"""
A_ : Tuple = False
if decimal < 0:
A_ : List[Any] = True
decimal *= -1
while decimal > 0:
A_ , A_ : Any = divmod(a_ , 1_6 )
A_ : str = values[remainder] + hexadecimal
A_ : Optional[int] = """0x""" + hexadecimal
if negative:
A_ : Dict = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
'''simple docstring'''
import math
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
A_ : List[Any] = [True] * n
A_ : List[Any] = False
A_ : Union[str, Any] = False
A_ : List[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A_ : Optional[Any] = i * 2
while index < n:
A_ : Any = False
A_ : str = index + i
A_ : List[str] = [2]
for i in range(3 , a_ , 2 ):
if is_prime[i]:
primes.append(a_ )
return primes
def UpperCAmelCase ( a_ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
"""simple docstring"""
A_ : Any = math.floor(math.sqrt(a_ ) ) + 1_0_0
A_ : int = prime_sieve(a_ )
A_ : int = 0
A_ : Union[str, Any] = 0
A_ : List[str] = primes[prime_index]
while (last_prime**2) <= limit:
A_ : Tuple = primes[prime_index + 1]
A_ : List[Any] = last_prime**2
A_ : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
A_ : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A_ : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A_ : str = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A_ : Any = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 164 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
lowerCAmelCase = 0
def _snake_case ( self ) -> int:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = Path(UpperCamelCase_ ) / '''preprocessor_config.json'''
lowerCAmelCase = Path(UpperCamelCase_ ) / '''config.json'''
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ) -> int:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = Path(UpperCamelCase_ ) / '''preprocessor_config.json'''
lowerCAmelCase = Path(UpperCamelCase_ ) / '''config.json'''
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase = Path(UpperCamelCase_ ) / '''preprocessor_config.json'''
lowerCAmelCase = Path(UpperCamelCase_ ) / '''config.json'''
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase = CLIPImageProcessor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
lowerCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = Path(UpperCamelCase_ ) / '''preprocessor_config.json'''
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
UpperCamelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def _snake_case ( self ) -> Tuple:
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def _snake_case ( self ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _snake_case ( self ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _snake_case ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = Path(UpperCamelCase_ ) / '''preprocessor_config.json'''
lowerCAmelCase = Path(UpperCamelCase_ ) / '''config.json'''
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase_ , """w""" ) )
lowerCAmelCase = CustomImageProcessor.from_pretrained(UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Optional[int]:
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 46 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
__lowercase : Tuple = {}
if train_file is not None:
__lowercase : List[Any] = [train_file]
if eval_file is not None:
__lowercase : List[str] = [eval_file]
if test_file is not None:
__lowercase : List[Any] = [test_file]
__lowercase : List[str] = datasets.load_dataset('''csv''' , data_files=__UpperCamelCase )
__lowercase : str = list(ds[list(files.keys() )[0]].features.keys() )
__lowercase : Union[str, Any] = features_name.pop(__UpperCamelCase )
__lowercase : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase : List[str] = {label: i for i, label in enumerate(__UpperCamelCase )}
__lowercase : Optional[Any] = tokenizer.model_input_names
__lowercase : Optional[Any] = {}
if len(__UpperCamelCase ) == 1:
for k in files.keys():
__lowercase : str = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) , batched=__UpperCamelCase , )
elif len(__UpperCamelCase ) == 2:
for k in files.keys():
__lowercase : List[Any] = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , ) , batched=__UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase : Dict = {k: v for k, v in ex.items() if k in input_names}
__lowercase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase : Tuple = {k: v for k, v in ex.items() if k in input_names}
__lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowercase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase : List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(metadata={"help": "Which column contains the label"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the training file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the development file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the test file"} )
UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase =field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase ,__lowercase ,__lowercase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase ,__lowercase ,__lowercase ,__lowercase : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCamelCase ) , labelaid=__UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCamelCase ) -> Dict:
__lowercase : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase : Optional[Any] = TFTrainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase : List[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase : List[Any] = trainer.evaluate()
__lowercase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 249 | 0 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1, _UpperCAmelCase = 1, _UpperCAmelCase = 1.0E4, _UpperCAmelCase = False, _UpperCAmelCase = 1.0, ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
__UpperCAmelCase : int = float(embedding_dim // 2 )
__UpperCAmelCase : List[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__UpperCAmelCase : Optional[Any] = min_timescale * jnp.exp(jnp.arange(_a, dtype=jnp.floataa ) * -log_timescale_increment )
__UpperCAmelCase : int = jnp.expand_dims(_a, 1 ) * jnp.expand_dims(_a, 0 )
# scale embeddings
__UpperCAmelCase : List[Any] = scale * emb
if flip_sin_to_cos:
__UpperCAmelCase : int = jnp.concatenate([jnp.cos(_a ), jnp.sin(_a )], axis=1 )
else:
__UpperCAmelCase : Union[str, Any] = jnp.concatenate([jnp.sin(_a ), jnp.cos(_a )], axis=1 )
__UpperCAmelCase : Dict = jnp.reshape(_a, [jnp.shape(_a )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = jnp.floataa
@nn.compact
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
__UpperCAmelCase : int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(lowerCamelCase_ )
__UpperCAmelCase : str = nn.silu(lowerCamelCase_ )
__UpperCAmelCase : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(lowerCamelCase_ )
return temb
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 1
@nn.compact
def __call__( self : Dict , UpperCAmelCase_ : Any ):
"""simple docstring"""
return get_sinusoidal_embeddings(
lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 360 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
__UpperCAmelCase : Union[str, Any] = g.get_repo("huggingface/transformers" )
__UpperCAmelCase : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
__UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
__UpperCAmelCase : Any = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 37 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
lowerCAmelCase_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=256 ) -> List[str]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> List[Any]:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Tuple = os.path.join(_UpperCamelCase , '''tmp''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Dict = read_json(os.path.join(_UpperCamelCase , '''params.json''' ) )
snake_case_ : List[Any] = NUM_SHARDS[model_size]
snake_case_ : Union[str, Any] = params['''n_layers''']
snake_case_ : List[Any] = params['''n_heads''']
snake_case_ : int = n_heads // num_shards
snake_case_ : str = params['''dim''']
snake_case_ : str = dim // n_heads
snake_case_ : int = 10_000.0
snake_case_ : List[str] = 1.0 / (base ** (torch.arange(0 , _UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Union[str, Any] = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ : List[Any] = n_heads_per_shard // num_key_value_heads
snake_case_ : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : List[Any] = n_heads
snake_case_ : Dict = n_heads_per_shard
snake_case_ : str = dim
# permute for sliced rotary
def permute(_UpperCamelCase , _UpperCamelCase=n_heads , _UpperCamelCase=dim , _UpperCamelCase=dim ):
return w.view(_UpperCamelCase , dima // n_heads // 2 , 2 , _UpperCamelCase ).transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : str = torch.load(os.path.join(_UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
snake_case_ : List[Any] = [
torch.load(os.path.join(_UpperCamelCase , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(_UpperCamelCase )
]
snake_case_ : List[Any] = 0
snake_case_ : str = {'''weight_map''': {}}
for layer_i in range(_UpperCamelCase ):
snake_case_ : List[str] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : Optional[Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : int = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : List[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ : Any = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : str = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Optional[Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Dict = inv_freq
for k, v in state_dict.items():
snake_case_ : Optional[int] = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : str = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : List[str] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ : Optional[int] = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : int = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
# Write configs
snake_case_ : Optional[int] = {'''total_size''': param_count * 2}
write_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
snake_case_ : Any = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ : Any = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ : Dict = LlamaConfig(
hidden_size=_UpperCamelCase , intermediate_size=compute_intermediate_size(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_UpperCamelCase , )
config.save_pretrained(_UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ : Optional[Any] = LlamaForCausalLM.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_UpperCamelCase , safe_serialization=_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : Union[str, Any] = tokenizer_class(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
snake_case_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case_ : str = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
snake_case_ : str = global_rng
snake_case_ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = min_seq_length
snake_case_ : Tuple = max_seq_length
snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Optional[int] = padding_value
snake_case_ : Union[str, Any] = sampling_rate
snake_case_ : Optional[int] = return_attention_mask
snake_case_ : str = do_normalize
snake_case_ : str = feature_size
snake_case_ : Optional[Any] = chunk_length
snake_case_ : Union[str, Any] = hop_length
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = WhisperFeatureExtractionTester(self )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
snake_case_ : Optional[int] = feat_extract_first.to_dict()
snake_case_ : Dict = feat_extract_second.to_dict()
snake_case_ : List[str] = feat_extract_first.mel_filters
snake_case_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ )
snake_case_ : int = feat_extract_first.to_dict()
snake_case_ : Optional[int] = feat_extract_second.to_dict()
snake_case_ : Union[str, Any] = feat_extract_first.mel_filters
snake_case_ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test batched
snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : List[str] = np.asarray(__magic_name__ )
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test truncation required
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated]
snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
import torch
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case_ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : str = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case_ : List[Any] = self._load_datasamples(1 )
snake_case_ : Union[str, Any] = WhisperFeatureExtractor()
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[int] = self._load_datasamples(1 )[0]
snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0]
self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
| 279 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] ="time_series_transformer"
a : Union[str, Any] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = [1, 2, 3, 4, 5, 6, 7] , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 64 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = prediction_length
lowerCAmelCase : int = context_length or prediction_length
lowerCAmelCase : Tuple = distribution_output
lowerCAmelCase : Any = loss
lowerCAmelCase : List[Any] = input_size
lowerCAmelCase : Optional[int] = num_time_features
lowerCAmelCase : Union[str, Any] = lags_sequence
lowerCAmelCase : Union[str, Any] = scaling
lowerCAmelCase : Tuple = num_dynamic_real_features
lowerCAmelCase : List[Any] = num_static_real_features
lowerCAmelCase : List[str] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : Optional[Any] = cardinality
else:
lowerCAmelCase : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Optional[int] = input_size * len(__UpperCAmelCase ) + self._number_of_features
lowerCAmelCase : Optional[Any] = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : Optional[int] = decoder_attention_heads
lowerCAmelCase : Any = encoder_ffn_dim
lowerCAmelCase : Any = decoder_ffn_dim
lowerCAmelCase : Any = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : Any = attention_dropout
lowerCAmelCase : Tuple = activation_dropout
lowerCAmelCase : List[str] = encoder_layerdrop
lowerCAmelCase : Tuple = decoder_layerdrop
lowerCAmelCase : List[Any] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[int] = use_cache
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 354 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = data
lowerCAmelCase : Tuple = None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" " )
lowerCAmelCase : List[str] = temp.next
print()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = Node(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.head
lowerCAmelCase : Optional[Any] = new_node
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
lowerCAmelCase : str = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : Union[str, Any] = node_a.next
lowerCAmelCase : Any = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
lowerCAmelCase , lowerCAmelCase : str = node_a.data, node_a.data
if __name__ == "__main__":
lowerCAmelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 133 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = RoCBertTokenizer
__lowerCamelCase : str = None
__lowerCamelCase : str = False
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = filter_non_english
def _lowerCAmelCase ( self ):
super().setUp()
A : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
A : List[Any] = {}
A : List[Any] = {}
for i, value in enumerate(lowerCamelCase__ ):
A : List[str] = i
A : Optional[int] = i
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""word_shape_file"""] )
A : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file, """w""", encoding="""utf-8""" ) as word_shape_writer:
json.dump(lowerCamelCase__, lowerCamelCase__, ensure_ascii=lowerCamelCase__ )
with open(self.word_pronunciation_file, """w""", encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(lowerCamelCase__, lowerCamelCase__, ensure_ascii=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
A : int = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(lowerCamelCase__, ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ ), [5, 6, 2, 5, 7, 8] )
def _lowerCAmelCase ( self ):
A : List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ), ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _lowerCAmelCase ( self ):
A : List[str] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def _lowerCAmelCase ( self ):
A : str = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__, strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""h\u00E9llo"""] )
def _lowerCAmelCase ( self ):
A : Any = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__, strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def _lowerCAmelCase ( self ):
A : List[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _lowerCAmelCase ( self ):
A : Dict = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__, strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _lowerCAmelCase ( self ):
A : Any = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__, strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _lowerCAmelCase ( self ):
A : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__, never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _lowerCAmelCase ( self ):
A : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
A : Union[str, Any] = {}
for i, token in enumerate(lowerCamelCase__ ):
A : Any = i
A : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=lowerCamelCase__, unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ), [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ), ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ), ["""[UNK]""", """runn""", """##ing"""] )
def _lowerCAmelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _lowerCAmelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _lowerCAmelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]], [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
A : str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]], [["""[UNK]"""], [], ["""[UNK]"""]] )
def _lowerCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : Optional[int] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
A : int = tokenizer_r.encode_plus(
lowerCamelCase__, return_attention_mask=lowerCamelCase__, return_token_type_ids=lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__, )
A : int = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__, """do_lower_case""" ) else False
A : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results], tokens["""offset_mapping"""] )
def _lowerCAmelCase ( self ):
A : Tuple = ["""的""", """人""", """有"""]
A : Tuple = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : Tuple = True
A : Optional[int] = self.tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : str = tokenizer_p.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : List[Any] = tokenizer_r.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
A : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = False
A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : List[str] = tokenizer_r.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : Tuple = tokenizer_p.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : List[Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
A : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
A : Optional[Any] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Dict = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
A : Optional[int] = tokenizer.encode("""你好""", add_special_tokens=lowerCamelCase__ )
A : str = tokenizer.encode("""你是谁""", add_special_tokens=lowerCamelCase__ )
A : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__, lowerCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A : List[str] = """你好,你是谁"""
A : Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
A : List[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
A : Optional[int] = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ )
A : List[Any] = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.prepare_for_model(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : str = tokenizer.encode_plus(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
| 116 |
def __UpperCamelCase ( _lowerCAmelCase = 100_0000 ) -> int:
"""simple docstring"""
A : str = limit + 1
A : Tuple = [0] * limit
for first_term in range(1 , _lowerCAmelCase ):
for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
A : Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A : Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Any = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["BeitFeatureExtractor"]
lowerCamelCase : str = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "T5Config"
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
| 114 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
lowercase__ = vocab_size
lowercase__ = d_embed
lowercase__ = d_proj
lowercase__ = cutoffs + [vocab_size]
lowercase__ = [0] + self.cutoffs
lowercase__ = div_val
lowercase__ = self.cutoffs[0]
lowercase__ = len(self.cutoffs ) - 1
lowercase__ = self.shortlist_size + self.n_clusters
lowercase__ = keep_order
lowercase__ = []
lowercase__ = []
def A__ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if self.n_clusters > 0:
lowercase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase__ , name="""cluster_weight""" )
lowercase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCamelCase__ )
else:
self.out_projs.append(lowerCamelCase__ )
lowercase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase__ , name=F'''out_layers_._{i}_._weight''' , )
lowercase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCamelCase__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ = self.d_embed // (self.div_val**i)
lowercase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCamelCase__ )
lowercase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCamelCase__ , name=F'''out_layers_._{i}_._weight''' , )
lowercase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCamelCase__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase__ )
@staticmethod
def A__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> int:
'''simple docstring'''
lowercase__ = x
if proj is not None:
lowercase__ = tf.einsum("""ibd,ed->ibe""" , lowerCamelCase__ , lowerCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , lowerCamelCase__ , lowerCamelCase__ ) + b
@staticmethod
def A__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = shape_list(lowerCamelCase__ )
lowercase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowercase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=False ) -> int:
'''simple docstring'''
lowercase__ = 0
if self.n_clusters == 0:
lowercase__ = self._logit(lowerCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowercase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
lowercase__ = tf.nn.log_softmax(lowerCamelCase__ , axis=-1 )
else:
lowercase__ = shape_list(lowerCamelCase__ )
lowercase__ = []
lowercase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase__ = (target >= l_idx) & (target < r_idx)
lowercase__ = tf.where(lowerCamelCase__ )
lowercase__ = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ ) - l_idx
if self.div_val == 1:
lowercase__ = self.out_layers[0][0][l_idx:r_idx]
lowercase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase__ = self.out_layers[i][0]
lowercase__ = self.out_layers[i][1]
if i == 0:
lowercase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowercase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowercase__ = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[0] )
lowercase__ = tf.nn.log_softmax(lowerCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase__ = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
else:
lowercase__ = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[i] )
lowercase__ = tf.nn.log_softmax(lowerCamelCase__ )
lowercase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase__ )
if target is not None:
lowercase__ = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase__ , -cur_logprob , shape_list(lowerCamelCase__ ) )
lowercase__ = tf.concat(lowerCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
lowercase__ = tf.reduce_mean(lowerCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 164 |
'''simple docstring'''
def _A ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowercase__ = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(lowercase__ ) )[2:]
lowercase__ = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 7 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a_ = random.Random()
def lowerCamelCase__ ( _a , _a=1.0 , _a=None , _a=None):
if rng is None:
SCREAMING_SNAKE_CASE : int = global_rng
SCREAMING_SNAKE_CASE : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , a : Any , a : Dict=7 , a : int=400 , a : Tuple=2000 , a : Union[str, Any]=2048 , a : Dict=128 , a : Union[str, Any]=1 , a : List[Any]=512 , a : Any=30 , a : int=4_4100 , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = min_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram_length
SCREAMING_SNAKE_CASE : List[Any] = feature_size
SCREAMING_SNAKE_CASE : int = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : Optional[int] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __UpperCamelCase ( self : List[Any] , a : List[str]=False , a : Dict=False ) -> Any:
"""simple docstring"""
def _flatten(a : Optional[Any] ):
return list(itertools.chain(*a ) )
if equal_length:
SCREAMING_SNAKE_CASE : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Any = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =TvltFeatureExtractor
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractionTester(self )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a , "spectrogram_length" ) )
self.assertTrue(hasattr(a , "feature_size" ) )
self.assertTrue(hasattr(a , "num_audio_channels" ) )
self.assertTrue(hasattr(a , "hop_length" ) )
self.assertTrue(hasattr(a , "chunk_length" ) )
self.assertTrue(hasattr(a , "sampling_rate" ) )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_pretrained(a )
SCREAMING_SNAKE_CASE : str = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : List[str] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Tuple = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a , "feat_extract.json" )
feat_extract_first.to_json_file(a )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_json_file(a )
SCREAMING_SNAKE_CASE : int = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : Tuple = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Dict = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : Any = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(a , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(
a , return_tensors="np" , sampling_rate=4_4100 , mask_audio=a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(a , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __UpperCamelCase ( self : Tuple , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : List[str] = ds.sort("id" ).select(range(a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Optional[Any] = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(a , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a , atol=1e-4 ) )
| 76 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_lowerCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_lowerCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ):
"""simple docstring"""
lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
for id_pred, label in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCAmelCase__ : Dict = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Optional[int] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase )
lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" )
fas.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) )
ems.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase )
lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,)
def UpperCAmelCase_ ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" )
elif self.config_name == "record":
lowerCAmelCase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 37 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : str = self.vocab_size - 1
def __A ( self ):
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : str = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Tuple = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCAmelCase : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , a__ , a__ , a__ , a__ , *a__ ):
_lowerCAmelCase : int = OpenAIGPTModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , token_type_ids=a__ , head_mask=a__ )
_lowerCAmelCase : Optional[Any] = model(a__ , token_type_ids=a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , *a__ ):
_lowerCAmelCase : Tuple = OpenAIGPTLMHeadModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , *a__ ):
_lowerCAmelCase : Optional[Any] = OpenAIGPTDoubleHeadsModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Any = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , *a__ ):
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = OpenAIGPTForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : List[str] = config_and_inputs
_lowerCAmelCase : List[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : Tuple = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : Tuple = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a__ , )
_lowerCAmelCase : str = inputs_dict["""labels"""]
_lowerCAmelCase : Tuple = inputs_dict["""labels"""]
_lowerCAmelCase : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a__ , )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Any = OpenAIGPTModelTester(self )
_lowerCAmelCase : str = ConfigTester(self , config_class=a__ , n_embd=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a__ )
@slow
def __A ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = OpenAIGPTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : str = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(a__ )
_lowerCAmelCase : Tuple = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a__ ) # the president is
_lowerCAmelCase : Any = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCAmelCase : Union[str, Any] = model.generate(a__ , do_sample=a__ )
self.assertListEqual(output_ids[0].tolist() , a__ )
| 371 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : Tuple = torch.nn.Linear(2 ,4 )
_lowerCAmelCase : Union[str, Any] = torch.optim.AdamW(model.parameters() ,lr=1.0 )
_lowerCAmelCase : Tuple = torch.optim.lr_scheduler.OneCycleLR(_lowerCamelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
_lowerCAmelCase : Tuple = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCAmelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> int:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Any:
_lowerCAmelCase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCamelCase )
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
_lowerCAmelCase : Tuple = Accelerator(cpu=a__ )
def __A ( self ):
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase : Any = GradientState()
assert state.num_steps == 1
_lowerCAmelCase : Optional[int] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCAmelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def __A ( self ):
_lowerCAmelCase : Optional[int] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __A ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ , **a__ ):
pass
with patch("""torch.cuda.set_device""" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
_lowerCAmelCase : Dict = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def __A ( self ):
_lowerCAmelCase : Any = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : List[Any] = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : Optional[Any] = get_signature(a__ )
# saving hook
def save_config(a__ , a__ , a__ ):
_lowerCAmelCase : Dict = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(a__ , """data.json""" ) , """w""" ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ , a__ ):
with open(os.path.join(a__ , """data.json""" ) , """r""" ) as f:
_lowerCAmelCase : int = json.load(a__ )
_lowerCAmelCase : str = config["""class_name"""]
_lowerCAmelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(a__ )
_lowerCAmelCase : int = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Dict = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Optional[Any] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Any = None
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Optional[int] = [1, 2, 3]
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map={"""""": 0} , )
_lowerCAmelCase : List[str] = Accelerator()
# This should work
_lowerCAmelCase : List[Any] = accelerator.prepare(a__ )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Any = Accelerator()
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : Optional[Any] = """cpu"""
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : List[str] = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Dict = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : List[str] = infer_auto_device_map(a__ )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : Tuple = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : Optional[int] = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : str = Accelerator()
# This should work
_lowerCAmelCase : str = accelerator.prepare(a__ )
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Any = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_lowerCAmelCase : List[str] = Accelerator(cpu=a__ )
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
| 126 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : Tuple = "codegen"
snake_case_ : Optional[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , snake_case__ : Any=50_400 , snake_case__ : int=2_048 , snake_case__ : Optional[Any]=2_048 , snake_case__ : Tuple=4_096 , snake_case__ : List[str]=28 , snake_case__ : List[Any]=16 , snake_case__ : int=64 , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Dict=1e-5 , snake_case__ : int=0.02 , snake_case__ : Union[str, Any]=True , snake_case__ : str=50_256 , snake_case__ : List[str]=50_256 , snake_case__ : Optional[int]=False , **snake_case__ : str , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_ctx
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = rotary_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : List[str] , snake_case__ : PretrainedConfig , snake_case__ : str = "default" , snake_case__ : List[PatchingSpec] = None , snake_case__ : bool = False , ):
"""simple docstring"""
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , "pad_token_id" , snake_case__ ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
_UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self._config.n_head
def UpperCamelCase ( self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
_UpperCAmelCase = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return 13
| 133 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , ):
__lowerCAmelCase = parent
__lowerCAmelCase = 13
__lowerCAmelCase = 7
__lowerCAmelCase = 30
__lowerCAmelCase = self.seq_length + self.mem_len
__lowerCAmelCase = 15
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 99
__lowerCAmelCase = [10, 50, 80]
__lowerCAmelCase = 32
__lowerCAmelCase = 32
__lowerCAmelCase = 4
__lowerCAmelCase = 8
__lowerCAmelCase = 1_28
__lowerCAmelCase = 2
__lowerCAmelCase = 2
__lowerCAmelCase = None
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 3
__lowerCAmelCase = self.vocab_size - 1
__lowerCAmelCase = 0.0_1
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = TFTransfoXLModel(__a )
__lowerCAmelCase , __lowerCAmelCase = model(__a ).to_tuple()
__lowerCAmelCase = {"input_ids": input_ids_a, "mems": mems_a}
__lowerCAmelCase , __lowerCAmelCase = model(__a ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = TFTransfoXLLMHeadModel(__a )
__lowerCAmelCase , __lowerCAmelCase = model(__a ).to_tuple()
__lowerCAmelCase = {"input_ids": input_ids_a, "labels": lm_labels}
__lowerCAmelCase , __lowerCAmelCase = model(__a ).to_tuple()
__lowerCAmelCase , __lowerCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
__lowerCAmelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__lowerCAmelCase , __lowerCAmelCase = model(__a ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = TFTransfoXLForSequenceClassification(__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : Any =() if is_tf_available() else ()
__UpperCAmelCase : Dict =(
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCAmelCase : str =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Optional[int] =False
__UpperCAmelCase : Optional[Any] =False
def snake_case ( self , __a , __a , __a , __a , __a ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self ):
__lowerCAmelCase = TFTransfoXLModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , d_embed=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
self.model_tester.set_seed()
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__a )
def snake_case ( self ):
self.model_tester.set_seed()
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__lowerCAmelCase = model.get_output_embeddings()
assert isinstance(__a , tf.keras.layers.Layer )
__lowerCAmelCase = model.get_bias()
assert name is None
else:
__lowerCAmelCase = model.get_output_embeddings()
assert x is None
__lowerCAmelCase = model.get_bias()
assert name is None
def snake_case ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFTransfoXLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def snake_case ( self ):
pass
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def snake_case ( self ):
__lowerCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__lowerCAmelCase = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__lowerCAmelCase = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__lowerCAmelCase = model.generate(__a , max_length=2_00 , do_sample=__a )
self.assertListEqual(output_ids[0].numpy().tolist() , __a )
| 259 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Tuple = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str ="""data2vec-audio"""
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.0_2 , __a=1e-5 , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=16 , __a=19 , __a=5 , __a=0.0_5 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="sum" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = conv_pos_kernel_size
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = vocab_size
__lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = xvector_output_dim
@property
def snake_case ( self ):
return math.prod(self.conv_stride )
| 259 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 |
from math import log
from scipy.constants import Boltzmann, physical_constants
a : Any = 300 # TEMPERATURE (unit = K)
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _A ( unittest.TestCase ):
def __A ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=__UpperCAmelCase , )
assert hasattr(self , """env""" )
def __A ( self , __UpperCAmelCase=1 ) -> Optional[Any]:
'''simple docstring'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-single' , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def __A ( self , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
def __A ( self ) -> Tuple:
'''simple docstring'''
# create estimator
__UpperCAmelCase : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__UpperCAmelCase : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__UpperCAmelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCAmelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __UpperCAmelCase )
| 16 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
self.assertGreater(__UpperCAmelCase , 0 )
self.assertEqual(
__UpperCAmelCase , [
{
"""score""": ANY(__UpperCAmelCase ),
"""label""": ANY(__UpperCAmelCase ),
"""box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )},
}
for i in range(__UpperCAmelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
__UpperCAmelCase : str = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
__UpperCAmelCase : Any = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 0.2
__UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 16 | 1 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 7 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (DPMSolverSinglestepScheduler,)
lowerCamelCase = (('num_inference_steps', 25),)
def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : List[str] )-> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int:
'''simple docstring'''
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
return sample
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 5_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,)
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
A__ = self.full_loop(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
self.check_over_configs(variance_type=lowercase_ )
self.check_over_configs(variance_type='learned_range' )
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase_,time_step=0 )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop(use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction' )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 7 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Dict , __lowerCamelCase : Any=3_0522 , __lowerCamelCase : int=768 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Dict=9500 , __lowerCamelCase : List[str]=1600 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Any=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : str=1e-12 , __lowerCamelCase : Optional[Any]=9 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : int=4 , __lowerCamelCase : Tuple=6.67 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , **__lowerCamelCase : List[Any] , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 359 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
SCREAMING_SNAKE_CASE__ = (low + high) // 2
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = max_subarray(_A , _A , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = max_subarray(_A , mid + 1 , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = max_cross_sum(_A , _A , _A , _A )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = float('''-inf''' ), -1
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = float('''-inf''' ), -1
SCREAMING_SNAKE_CASE__ = 0
for i in range(_A , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
SCREAMING_SNAKE_CASE__ = summ
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
SCREAMING_SNAKE_CASE__ = summ
SCREAMING_SNAKE_CASE__ = i
return max_left, max_right, (left_sum + right_sum)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [randint(1 , _A ) for _ in range(_A )]
SCREAMING_SNAKE_CASE__ = time.time()
max_subarray(_A , 0 , input_size - 1 )
SCREAMING_SNAKE_CASE__ = time.time()
return end - start
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
SCREAMING_SNAKE_CASE__ = [time_max_subarray(_A ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(_A , _A ):
print(_A , '''\t\t''' , _A )
plt.plot(_A , _A )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 218 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
__A = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A ( A__ ):
lowerCamelCase : Tuple = """van"""
def __init__( self , lowerCamelCase__=224 , lowerCamelCase__=3 , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[64, 128, 320, 512] , lowerCamelCase__=[3, 3, 12, 3] , lowerCamelCase__=[8, 8, 4, 4] , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-6 , lowerCamelCase__=1e-2 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 164 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :bool = True ):
"""simple docstring"""
lowerCamelCase__ : dict[T, list[T]] ={} # dictionary of lists
lowerCamelCase__ : int =directed
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :T , lowerCamelCase_ :T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
self.adj_list[destination_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase__ : Union[str, Any] =[destination_vertex]
lowerCamelCase__ : Any =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase__ : Tuple =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase__ : str =[destination_vertex]
lowerCamelCase__ : Optional[Any] =[]
return self
def __repr__( self :Optional[Any] ):
"""simple docstring"""
return pformat(self.adj_list )
| 126 | 0 |
"""simple docstring"""
from timeit import timeit
__UpperCamelCase : Optional[Any] = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = len(_UpperCAmelCase ) // 2
lowerCAmelCase = len(_UpperCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(_UpperCAmelCase ) <= 2:
return True
if s[0] == s[len(_UpperCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return s == s[::-1]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'all({name}(key) is value for key, value in test_data.items())'
lowerCAmelCase = F'from __main__ import test_data, {name}'
lowerCAmelCase = 50_0000
lowerCAmelCase = timeit(stmt=_UpperCAmelCase , setup=_UpperCAmelCase , number=_UpperCAmelCase )
print(F'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 309 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = (3, 32, 128)
UpperCamelCase :Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
UpperCamelCase :Tuple = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = '''test'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''test'''
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.randn(1 , 27 , 38 )
UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 259 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :List[Any] = parent
UpperCamelCase :List[str] = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Optional[Any] = patch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :Any = backbone_out_indices
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Any = backbone_featmap_shape
UpperCamelCase :Optional[int] = scope
UpperCamelCase :Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Tuple = (image_size // patch_size) ** 2
UpperCamelCase :int = num_patches + 1
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :int = None
if self.use_labels:
UpperCamelCase :str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[int] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = self.num_labels
UpperCamelCase :Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :int = self.num_labels
UpperCamelCase :str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Tuple = [*signature.parameters.keys()]
UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :int = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Tuple = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = prepare_img()
UpperCamelCase :Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 259 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''input_features''']
def __init__( self : Any , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : str=1_6000 , UpperCAmelCase_ : Optional[int]=160 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : Tuple , ):
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Any = n_fft
SCREAMING_SNAKE_CASE : List[Any] = hop_length
SCREAMING_SNAKE_CASE : List[str] = chunk_length
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = self.n_samples // hop_length
SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase_ , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=UpperCAmelCase_ , norm="slaney" , mel_scale="slaney" , )
def _A ( self : Optional[int] , UpperCAmelCase_ : np.array ):
SCREAMING_SNAKE_CASE : List[Any] = spectrogram(
UpperCAmelCase_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
SCREAMING_SNAKE_CASE : List[str] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : Tuple = np.maximum(UpperCAmelCase_ , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _A ( UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : float = 0.0 ):
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(UpperCAmelCase_ , np.intaa )
SCREAMING_SNAKE_CASE : int = []
for vector, length in zip(UpperCAmelCase_ , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE : Optional[Any] = padding_value
normed_input_values.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "max_length" , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE : Any = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE : List[Any] = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE : Optional[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=max_length if max_length else self.n_samples , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE : int = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE : int = [self._np_extract_fbank_features(UpperCAmelCase_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE : List[Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs.convert_to_tensors(UpperCAmelCase_ )
return padded_inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 319 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=_snake_case ,)
assert hasattr(self ,'''env''' )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Any=1 ) -> Any:
"""simple docstring"""
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f"""{self.env.base_job_name}-single""" ,instance_count=_snake_case ,instance_type=self.instance_type ,debugger_hook_config=_snake_case ,hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version='''py36''' ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
TrainingJobAnalytics(_snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowercase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ : Any = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,_snake_case )
| 16 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCamelCase_ = "sshleifer/bart-tiny-random"
UpperCamelCase_ = "patrickvonplaten/t5-tiny-random"
@require_torch
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return AutoConfig.from_pretrained(__snake_case )
def __UpperCamelCase ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Dict = create_student_by_copying_alternating_layers(__snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __UpperCamelCase ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(__snake_case , tempfile.mkdtemp() , e=1 , d=__snake_case )
def __UpperCamelCase ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(__snake_case , tempfile.mkdtemp() , e=1 , d=__snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __UpperCamelCase ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Dict = create_student_by_copying_alternating_layers(__snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __UpperCamelCase ( self ):
with self.assertRaises(__snake_case ):
create_student_by_copying_alternating_layers(__snake_case , tempfile.mkdtemp() , e=__snake_case , d=__snake_case )
| 309 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'git_vision_model'
def __init__( self , __snake_case=768 , __snake_case=3072 , __snake_case=12 , __snake_case=12 , __snake_case=3 , __snake_case=224 , __snake_case=16 , __snake_case="quick_gelu" , __snake_case=1e-5 , __snake_case=0.0 , __snake_case=0.02 , **__snake_case , ) -> int:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =intermediate_size
__a =num_hidden_layers
__a =num_attention_heads
__a =num_channels
__a =patch_size
__a =image_size
__a =initializer_range
__a =attention_dropout
__a =layer_norm_eps
__a =hidden_act
@classmethod
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__a , __a =cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
__a =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__snake_case , **__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'git'
def __init__( self , __snake_case=None , __snake_case=3_0522 , __snake_case=768 , __snake_case=6 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1024 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=False , __snake_case=101 , __snake_case=102 , __snake_case=None , **__snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , pad_token_id=__snake_case , **__snake_case )
if vision_config is None:
__a ={}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
__a =GitVisionConfig(**__snake_case )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_act
__a =intermediate_size
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =initializer_range
__a =layer_norm_eps
__a =position_embedding_type
__a =use_cache
__a =tie_word_embeddings
__a =num_image_with_embedding
__a =bos_token_id
__a =eos_token_id
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.vision_config.to_dict()
__a =self.__class__.model_type
return output
| 218 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__snake_case = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
__snake_case = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__snake_case = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__snake_case = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def A_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[int] ):
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def A_ ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=0.9 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Union[str, Any]=0.5 ):
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE__ = [
meteor_score.single_meteor_score(
word_tokenize(lowercase_ ) , word_tokenize(lowercase_ ) , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
else:
SCREAMING_SNAKE_CASE__ = [
meteor_score.single_meteor_score(lowercase_ , lowercase_ , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
return {"meteor": np.mean(lowercase_ )}
| 356 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__snake_case = 50_00_00
__snake_case ,__snake_case = os.path.split(__file__)
__snake_case = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.map(**UpperCamelCase_ )
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.filter(**UpperCamelCase_ )
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
SCREAMING_SNAKE_CASE__ = generate_example_dataset(
os.path.join(UpperCamelCase_ , 'dataset.arrow' ) , UpperCamelCase_ , num_examples=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=UpperCamelCase_ )
def tokenize(UpperCamelCase_ ):
return tokenizer(examples['text'] )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='numpy' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='pandas' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = filter(UpperCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase_ , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase_ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 169 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCamelCase_ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
UpperCamelCase_ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
UpperCamelCase_ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
UpperCamelCase_ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def _UpperCAmelCase ( ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> Optional[Any]:
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int ) -> Optional[Any]:
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : int ) -> int:
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ) -> Union[str, Any]:
_lowerCAmelCase : int = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ) -> Optional[int]:
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ) -> str:
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ) -> Dict:
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ) -> Any:
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def _UpperCAmelCase ( ) -> Dict:
_lowerCAmelCase : Optional[Any] = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
_lowerCAmelCase : Any = poker_hands.copy()
shuffle(_lowerCamelCase )
_lowerCAmelCase : List[str] = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def _UpperCAmelCase ( ) -> List[Any]:
# Test that five high straights are compared correctly.
_lowerCAmelCase : Dict = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _UpperCAmelCase ( ) -> str:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_lowerCAmelCase : Optional[int] = PokerHand("""2C 4S AS 3D 5C""" )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _UpperCAmelCase ( ) -> Any:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
_lowerCAmelCase : str = os.path.join(_lowerCamelCase , """poker_hands.txt""" )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
_lowerCAmelCase : List[Any] = line[:14].strip()
_lowerCAmelCase : Optional[int] = line[15:].strip()
_lowerCAmelCase , _lowerCAmelCase : Any = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
_lowerCAmelCase : Any = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 3_76
| 309 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 317 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 317 | 1 |
'''simple docstring'''
from copy import deepcopy
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : list[int] | None = None , SCREAMING_SNAKE_CASE_ : int | None = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
A: Tuple = size
A: Dict = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError('''Either arr or size must be specified''' )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[int] ) -> None:
'''simple docstring'''
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
A: List[str] = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 , self.size ):
A: Any = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _snake_case ( self : Optional[int] ) -> list[int]:
'''simple docstring'''
A: Optional[Any] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A: int = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
return index - (index & (-index))
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A: Dict = self.next_(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
'''simple docstring'''
self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
if right == 0:
return 0
A: Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A: Dict = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
return self.query(SCREAMING_SNAKE_CASE_ , index + 1 )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
A: Dict = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A: Optional[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def A (__lowerCamelCase :float , __lowerCamelCase :float ):
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
'''simple docstring'''
import numpy as np
import qiskit
def A (__lowerCamelCase :int = 8 , __lowerCamelCase :int | None = None ):
_lowerCAmelCase = np.random.default_rng(seed=__lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1 , seed_simulator=__lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase = job.result().get_counts(__lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase = gen_key[:key_len] if len(__lowerCamelCase ) >= key_len else gen_key.ljust(__lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 229 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->bool:
"""simple docstring"""
lowercase : str = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('''123456789''' )
def __lowercase ( ) ->int | None:
"""simple docstring"""
for base_num in range(9999, 4999, -1 ):
lowercase : Optional[Any] = 100002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333, 99, -1 ):
lowercase : Union[str, Any] = 1002003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase : int = False
try:
lowerCamelCase : Dict = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __lowercase :
"""simple docstring"""
def __init__( self , A = None , A = [] ) -> Optional[int]:
snake_case : List[str] = 0
snake_case : str = choices
snake_case : Any = prompt
if sys.platform == "win32":
snake_case : Tuple = """*"""
else:
snake_case : Union[str, Any] = """➔ """
def UpperCAmelCase ( self , A , A = "" ) -> int:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , A )
else:
forceWrite(self.choices[index] , A )
def UpperCAmelCase ( self , A ) -> Dict:
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(A )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def UpperCAmelCase ( self , A , A = 1 ) -> Union[str, Any]:
snake_case : Dict = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(A )
move_cursor(A , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def UpperCAmelCase ( self ) -> Optional[Any]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def UpperCAmelCase ( self ) -> Tuple:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def UpperCAmelCase ( self ) -> str:
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def UpperCAmelCase ( self ) -> List[Any]:
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(A )] for number in range(1_0 )] )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = int(chr(self.current_selection ) )
snake_case : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , A )
else:
return
else:
return
def UpperCAmelCase ( self , A = 0 ) -> Union[str, Any]:
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
snake_case : Optional[int] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(A )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case : Union[str, Any] = int(builtins.input() )
except ValueError:
snake_case : int = default_choice
else:
snake_case : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(A , """\n""" )
return choice
| 350 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase ( self ) -> Any:
return self._get_dummy_components()
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith("""mps""" ):
snake_case : List[str] = torch.manual_seed(A )
else:
snake_case : Optional[int] = torch.Generator(device=A ).manual_seed(A )
snake_case : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase ( self ) -> List[str]:
self._test_save_load_local()
def UpperCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
# if
snake_case : Tuple = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
snake_case : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A , tokenizer=A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
snake_case , snake_case : Optional[int] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case : List[str] = None
snake_case : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case : Any = IFImgaImgPipeline(**pipe_a.components )
snake_case : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
snake_case : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A , A , A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
snake_case : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Tuple = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Optional[int] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : str = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> int:
# pipeline 1
_start_torch_memory_measurement()
snake_case : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Union[str, Any] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Optional[int] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : int = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A )
snake_case : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : int = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
snake_case : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A )
snake_case : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Tuple = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Tuple = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : Any = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A )
snake_case : str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A )
snake_case : List[str] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 176 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
def lowerCAmelCase__ ( ) -> Node | None:
'''simple docstring'''
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
return tree
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
if root is None:
return output
A__ = deque([root] )
while process_queue:
A__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
A__ = []
A__ = 0
A__ = height(SCREAMING_SNAKE_CASE_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = 0
return output
def lowerCAmelCase__ ( ) -> None: # Main function for testing.
'''simple docstring'''
A__ = make_tree()
print(F'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE_ )}' )
print(F'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE_ )}' )
print(F'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE_ )}' , "\n" )
print(F'Height of Tree: {height(SCREAMING_SNAKE_CASE_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(SCREAMING_SNAKE_CASE_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(SCREAMING_SNAKE_CASE_ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , level=SCREAMING_SNAKE_CASE_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : List[str] = 1_6
_lowerCAmelCase : List[Any] = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
set_seed(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase__ = os.path.split(_lowerCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCAmelCase ),
"epoch": epoch,
} , step=_lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169 | 0 |
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = generate_pascal_triangle(UpperCAmelCase )
for row_idx in range(UpperCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
snake_case_ = []
for current_row_idx in range(UpperCAmelCase ):
snake_case_ = populate_current_row(UpperCAmelCase , UpperCAmelCase )
triangle.append(UpperCAmelCase )
return triangle
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case_ = 1, 1
for current_col_idx in range(1 , UpperCAmelCase ):
calculate_current_element(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return current_row
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
snake_case_ = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case_ = triangle[current_row_idx - 1][current_col_idx]
snake_case_ = above_to_left_elt + above_to_right_elt
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
snake_case_ = [[1]]
for row_index in range(1 , UpperCAmelCase ):
snake_case_ = [0] + result[-1] + [0]
snake_case_ = row_index + 1
# Calculate the number of distinct elements in a row
snake_case_ = sum(divmod(UpperCAmelCase , 2 ) )
snake_case_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case_ = row_first_half + row_second_half
result.append(UpperCAmelCase )
return result
def UpperCAmelCase ( ) -> Union[str, Any]:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase , UpperCAmelCase ) -> None:
snake_case_ = f'{func.__name__}({value})'
snake_case_ = timeit(f'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCAmelCase , UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 357 |
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 12 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _lowercase :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = str(id_ )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = []
__lowerCAmelCase = {} # {vertex:distance}
def __lt__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
return self.key < other.key
def __repr__( self : str ) -> Tuple:
return self.id
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
self.neighbors.append(SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = weight
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : str ) -> List[str]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case_ )
graph[b - 1].add_edge(graph[a - 1] , snake_case_ )
def UpperCamelCase_ ( snake_case_ : list , snake_case_ : Vertex ) -> list:
'''simple docstring'''
__lowerCAmelCase = []
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = graph[:]
while q:
__lowerCAmelCase = min(snake_case_ )
q.remove(snake_case_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(snake_case_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase_ ( snake_case_ : list , snake_case_ : Vertex ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = list(snake_case_ )
hq.heapify(snake_case_ )
while h:
__lowerCAmelCase = hq.heappop(snake_case_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
hq.heapify(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_A : int = True
except (ImportError, ModuleNotFoundError):
_A : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
re.sub("""<n>""" , """""" , snake_case_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case_ ) )
| 229 | 1 |
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
lowercase :Tuple = len(lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], lowerCamelCase, lowerCamelCase, )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :list[list[str]] = []
depth_first_search([], [], [], lowerCamelCase, lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase )
print("" )
print(len(lowerCamelCase ), "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 158 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 158 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __a ( ) ->List[str]:
a__: Optional[int] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
a__: Union[str, Any] = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('RGB' )
return image
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Dict = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Optional[int] = dct.pop(UpperCamelCase_ )
a__: List[Any] = val
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__: Tuple = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
a__: Dict = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
a__: Any = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
a__: Tuple = qkv_bias
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Optional[Any] = 364 if 'coco' in model_name else 224
a__: Optional[int] = BlipaVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a__: str = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
a__: List[Any] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
a__: List[Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__: Tuple = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
a__: Any = BlipaConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]:
a__: List[Any] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
a__: List[str] = tokenizer('\n' , add_special_tokens=UpperCamelCase_ ).input_ids[0]
a__ , a__: List[Any] = get_blipa_config(UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
a__: List[str] = BlipaForConditionalGeneration(UpperCamelCase_ ).eval()
a__: Optional[int] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
a__ , a__: List[str] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a__: Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
a__ , a__ , a__: Tuple = load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
a__: Union[str, Any] = original_model.state_dict()
a__: int = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__: List[str] = state_dict.pop(UpperCamelCase_ )
if key.startswith('Qformer.bert' ):
a__: Optional[Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a__: Union[str, Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
a__: Dict = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
a__: List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
a__: List[str] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
a__: str = key.replace('t5' , 'language' )
a__: int = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
a__ , a__: Dict = hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a__: Optional[Any] = load_demo_image()
a__: List[str] = vis_processors['eval'](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
a__: Optional[Any] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(UpperCamelCase_ )
# create processor
a__: List[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
a__: Optional[Any] = BlipaProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
a__: Optional[int] = processor(images=UpperCamelCase_ , return_tensors='pt' ).pixel_values.to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
a__: Optional[int] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
a__: Dict = hf_model(UpperCamelCase_ , UpperCamelCase_ ).logits
else:
a__: List[str] = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
a__: Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
a__: Dict = hf_model(UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a__: str = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=UpperCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a__: Union[str, Any] = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=UpperCamelCase_ )
else:
# cast to same type
a__: str = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase_ ) , UpperCamelCase_ , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
a__: Optional[Any] = ''
a__: str = tokenizer(UpperCamelCase_ , return_tensors='pt' ).input_ids.to(UpperCamelCase_ )
a__: str = original_model.generate({'image': original_pixel_values} )
a__: Optional[int] = hf_model.generate(
UpperCamelCase_ , UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , UpperCamelCase_ )
a__: Union[str, Any] = input_ids.shape[1]
a__: List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase_ )
a__: List[str] = [text.strip() for text in output_text]
print('HF generation:' , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
lowercase__ = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowercase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 290 |
import os
from math import logaa
def _lowercase ( UpperCamelCase_ = "base_exp.txt" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase_ ) , UpperCamelCase_ ) ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(map(UpperCamelCase_ , line.split(',' ) ) )
if x * logaa(UpperCamelCase_ ) > largest:
SCREAMING_SNAKE_CASE__ = x * logaa(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 176 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case : Optional[datasets.Features] = None
def _UpperCamelCase (a__ :"pyspark.sql.DataFrame" , a__ :List[int] , ):
"""simple docstring"""
import pyspark
def generate_fn():
UpperCamelCase__ = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
UpperCamelCase__ = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
UpperCamelCase__ = partition_df.collect()
UpperCamelCase__ = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class __SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , ):
UpperCamelCase__ = df
UpperCamelCase__ = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase__ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.split_shard_indices_by_worker(__lowerCAmelCase , __lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
return len(self.partition_order )
class __SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
snake_case : Any = SparkConfig
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
import pyspark
UpperCamelCase__ = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase__ = df
UpperCamelCase__ = working_dir
super().__init__(
cache_dir=__lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **__lowerCAmelCase , )
def _lowerCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(__lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCAmelCase )
UpperCamelCase__ = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCAmelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase__ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def _lowerCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _lowerCamelCase ( self , __lowerCAmelCase ):
import pyspark
def get_arrow_batch_size(__lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
UpperCamelCase__ = self.df.count()
UpperCamelCase__ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase__ = (
self.df.limit(__lowerCAmelCase )
.repartition(1 )
.mapInArrow(__lowerCAmelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase__ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase__ = min(__lowerCAmelCase , int(approx_total_size / max_shard_size ) )
UpperCamelCase__ = self.df.repartition(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
import pyspark
UpperCamelCase__ = ParquetWriter if file_format == """parquet""" else ArrowWriter
UpperCamelCase__ = os.path.join(self._working_dir , os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath
UpperCamelCase__ = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase__ = self.config.features
UpperCamelCase__ = self._writer_batch_size
UpperCamelCase__ = self._fs.storage_options
def write_arrow(__lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase__ = pyspark.TaskContext().taskAttemptId()
UpperCamelCase__ = next(__lowerCAmelCase , __lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
UpperCamelCase__ = 0
UpperCamelCase__ = writer_class(
features=__lowerCAmelCase , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
UpperCamelCase__ = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase__ , UpperCamelCase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
UpperCamelCase__ = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
UpperCamelCase__ = pa.Table.from_batches([batch] )
writer.write_table(__lowerCAmelCase )
if writer._num_bytes > 0:
UpperCamelCase__ , UpperCamelCase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ):
UpperCamelCase__ = os.path.join(os.path.dirname(__lowerCAmelCase ) , os.path.basename(__lowerCAmelCase ) )
shutil.move(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = (
self.df.mapInArrow(__lowerCAmelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "arrow" , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
self._validate_cache_dir()
UpperCamelCase__ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCAmelCase )
UpperCamelCase__ = not is_remote_filesystem(self._fs )
UpperCamelCase__ = os.path.join if is_local else posixpath.join
UpperCamelCase__ = """-TTTTT-SSSSS-of-NNNNN"""
UpperCamelCase__ = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
UpperCamelCase__ = path_join(self._output_dir , __lowerCAmelCase )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = []
UpperCamelCase__ = []
for task_id, content in self._prepare_split_single(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCAmelCase )
UpperCamelCase__ = total_num_examples
UpperCamelCase__ = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
UpperCamelCase__ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase__ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
rename(
__lowerCAmelCase , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
UpperCamelCase__ = []
UpperCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) ):
UpperCamelCase__ , UpperCamelCase__ = task_id_and_num_shards[i]
for shard_id in range(__lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCAmelCase , len(__lowerCAmelCase ) ).map(lambda __lowerCAmelCase : _rename_shard(*__lowerCAmelCase ) ).collect()
else:
# don't use any pattern
UpperCamelCase__ = 0
UpperCamelCase__ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(__lowerCAmelCase , """""" ) , )
def _lowerCamelCase ( self , __lowerCAmelCase , ):
return SparkExamplesIterable(self.df )
| 366 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : int = """cvt"""
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=[7, 3, 3] , __lowerCAmelCase=[4, 2, 2] , __lowerCAmelCase=[2, 1, 1] , __lowerCAmelCase=[64, 192, 384] , __lowerCAmelCase=[1, 3, 6] , __lowerCAmelCase=[1, 2, 10] , __lowerCAmelCase=[4.0, 4.0, 4.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.1] , __lowerCAmelCase=[True, True, True] , __lowerCAmelCase=[False, False, True] , __lowerCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase=[3, 3, 3] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = patch_stride
UpperCamelCase__ = patch_padding
UpperCamelCase__ = embed_dim
UpperCamelCase__ = num_heads
UpperCamelCase__ = depth
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = attention_drop_rate
UpperCamelCase__ = drop_rate
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = cls_token
UpperCamelCase__ = qkv_projection_method
UpperCamelCase__ = kernel_qkv
UpperCamelCase__ = padding_kv
UpperCamelCase__ = stride_kv
UpperCamelCase__ = padding_q
UpperCamelCase__ = stride_q
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
| 87 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = 0
while number > 0:
__UpperCamelCase :Optional[Any] = number % 10
sum_of_digits += last_digit
__UpperCamelCase :str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 100 ):
'''simple docstring'''
__UpperCamelCase :Dict = factorial(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = split_and_add(SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 43 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "stem.conv" in name:
lowercase__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowercase__ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowercase__ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowercase__ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowercase__ = 'bit.encoder.' + name
return name
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 366 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowercase_ = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCamelCase () -> Union[str, Any]:
lowercase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__ = bs[:]
lowercase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
lowercase__ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
return pairs
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Dict , a : Union[str, Any] , a : Optional[Any] , a : List[str]="replace" , a : Optional[int]="<s>" , a : List[str]="</s>" , a : List[Any]="</s>" , a : Union[str, Any]="<s>" , a : Any="<unk>" , a : Optional[int]="<pad>" , a : Optional[Any]="<mask>" , a : Tuple=False , **a : List[Any] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
lowercase__ = json.load(a )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = errors # how to handle errors in decoding
lowercase__ = bytes_to_unicode()
lowercase__ = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='utf-8' ) as merges_handle:
lowercase__ = merges_handle.read().split('\n' )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ = dict(zip(a , range(len(a ) ) ) )
lowercase__ = {}
lowercase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[Any] )-> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(a )
lowercase__ = get_pairs(a )
if not pairs:
return token
while True:
lowercase__ = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(a ):
try:
lowercase__ = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(a )
lowercase__ = new_word
if len(a ) == 1:
break
else:
lowercase__ = get_pairs(a )
lowercase__ = ' '.join(a )
lowercase__ = word
return word
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = []
for token in re.findall(self.pat , a ):
lowercase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] )-> Optional[int]:
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return self.decoder.get(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = ''.join(a )
lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
lowercase__ = 0
with open(a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowercase__ = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Dict , a : Dict=False , **a : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase__ = ' ' + text
return (text, kwargs)
| 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def A (__A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = {}
if train_file is not None:
UpperCAmelCase_ = [train_file]
if eval_file is not None:
UpperCAmelCase_ = [eval_file]
if test_file is not None:
UpperCAmelCase_ = [test_file]
UpperCAmelCase_ = datasets.load_dataset('''csv''' , data_files=lowerCamelCase_ )
UpperCAmelCase_ = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase_ = features_name.pop(lowerCamelCase_ )
UpperCAmelCase_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase_ = {label: i for i, label in enumerate(lowerCamelCase_ )}
UpperCAmelCase_ = tokenizer.model_input_names
UpperCAmelCase_ = {}
if len(lowerCamelCase_ ) == 1:
for k in files.keys():
UpperCAmelCase_ = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' ) , batched=lowerCamelCase_ , )
elif len(lowerCamelCase_ ) == 2:
for k in files.keys():
UpperCAmelCase_ = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , ) , batched=lowerCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase_ = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase_ = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase_ = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase_ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase_ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase_ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : Dict = logging.getLogger(__name__)
@dataclass
class __snake_case :
UpperCAmelCase__ : int = field(metadata={'''help''': '''Which column contains the label'''} )
UpperCAmelCase__ : str = field(default=lowercase__ , metadata={'''help''': '''The path of the training file'''} )
UpperCAmelCase__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the development file'''} )
UpperCAmelCase__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the test file'''} )
UpperCAmelCase__ : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class __snake_case :
UpperCAmelCase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ : bool = field(default=lowercase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def A () -> Dict:
"""simple docstring"""
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
UpperCAmelCase_ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase_ = TFTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCamelCase_ )
return results
if __name__ == "__main__":
main()
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PIL.Image.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> np.ndarray:
return flip_channel_order(_lowerCAmelCase , data_format=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase = [self.flip_channel_order(image=_lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> int:
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_lowerCAmelCase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_lowerCAmelCase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 158 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : str = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
UpperCamelCase : Any = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
UpperCamelCase : str = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Any="binary" , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : str = fa_score(
UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ , pos_label=UpperCAmelCase_ , average=UpperCAmelCase_ , sample_weight=UpperCAmelCase_)
return {"f1": float(UpperCAmelCase_) if score.size == 1 else score}
| 355 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _UpperCamelCase ( __A , __A , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCamelCase )
if decoder_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase )
if cross_attn_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowercase_ :
def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=16 , a=2 , a=4 , a=4 , a="relu" , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=20 , a=2 , a=1 , a=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def __a ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.eos_token_id # Eos Token
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = self.get_config()
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def __a ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __a ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , a , a ):
UpperCamelCase__ = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
UpperCamelCase__ = inputs_dict["input_ids"]
UpperCamelCase__ = inputs_dict["attention_mask"]
UpperCamelCase__ = inputs_dict["head_mask"]
# first forward pass
UpperCamelCase__ = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ = model(lowercase_ , attention_mask=lowercase_ )["last_hidden_state"]
UpperCamelCase__ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[
"last_hidden_state"
]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-2 ) )
def __a ( self , a , a ):
UpperCamelCase__ = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval()
UpperCamelCase__ = model(**lowercase_ )
UpperCamelCase__ = outputs.encoder_last_hidden_state
UpperCamelCase__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_encoder()
encoder.save_pretrained(lowercase_ )
UpperCamelCase__ = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ )
UpperCamelCase__ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_decoder()
decoder.save_pretrained(lowercase_ )
UpperCamelCase__ = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ )
UpperCamelCase__ = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( __A , __A , __A , unittest.TestCase ):
__UpperCAmelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def __a ( self , a , a , a , a , a ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __a ( self ):
UpperCamelCase__ = MaMaaaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=lowercase_ )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
UpperCamelCase__ = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertEqual(info["missing_keys"] , [] )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) )
if not self.is_encoder_decoder:
UpperCamelCase__ = inputs["input_ids"]
del inputs["input_ids"]
else:
UpperCamelCase__ = inputs["input_ids"]
UpperCamelCase__ = inputs.get("decoder_input_ids" , lowercase_ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , lowercase_ )
UpperCamelCase__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase__ = wte(lowercase_ )
else:
UpperCamelCase__ = wte(lowercase_ )
UpperCamelCase__ = wte(lowercase_ )
with torch.no_grad():
model(**lowercase_ )[0]
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = input_dict["input_ids"]
UpperCamelCase__ = input_ids.ne(1 ).to(lowercase_ )
UpperCamelCase__ = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ )
if torch_device == "cuda":
model.half()
model.generate(lowercase_ , attention_mask=lowercase_ )
model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3 )
def _UpperCamelCase ( __A ) -> List[str]:
'''simple docstring'''
return torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase )
a__ : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowercase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __a ( self ):
UpperCamelCase__ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
UpperCamelCase__ = model(**lowercase_ )[0]
UpperCamelCase__ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def __a ( self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
# change to intended input
UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
UpperCamelCase__ = model(**lowercase_ )[0]
UpperCamelCase__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def __a ( self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
UpperCamelCase__ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCamelCase__ = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase__ = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
UpperCamelCase__ = model.generate(
input_ids=dct["input_ids"].to(lowercase_ ) , attention_mask=dct["attention_mask"].to(lowercase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCamelCase__ = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
UpperCamelCase__ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_ )
assert generated == expected_en
| 80 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : int = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
))
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
))
return embed
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
))
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight'''))
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias'''))
return attention_weights
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : Tuple = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token"))
return token
def lowercase_ ( ):
lowercase__ : List[str] = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : List[str] = 1000
lowercase__ : Dict = "huggingface/label-files"
lowercase__ : List[Any] = num_labels
lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1)[-1][4:6] == "13":
lowercase__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21":
lowercase__ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Union[str, Any] = [2, 2, 20]
lowercase__ : Optional[Any] = [3, 12, 16]
lowercase__ : Optional[Any] = [192, 768, 1024]
lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase)
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
lowercase__ : int = image_size
lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu"))
lowercase__ : Any = OrderedDict()
lowercase__ : int = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase)
for cnt in range(config.depth[idx]):
lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase)
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase)
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
image_processor.save_pretrained(_lowerCamelCase)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 87 | 0 |
import math
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : List[str] =0
while num > 0:
lowerCamelCase__ : Any =num % 8
lowerCamelCase__ : List[str] =octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowerCamelCase__ : Optional[Any] =math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(__lowerCamelCase )}'''
def snake_case__ ( ):
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 367 |
"""simple docstring"""
import numpy as np
from PIL import Image
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : int =0
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Union[str, Any] =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase__ : Union[str, Any] =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase__ : str =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Optional[int] =0
return updated_arr
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : str =0
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Dict =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase__ : Optional[Any] =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase__ : Optional[int] =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : int =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
_lowercase : int = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 272 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a__ : Tuple =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a__ : Tuple =[0, 25, 50]
a__ : List[str] =[25, 50, 75]
a__ : List[Any] =fuzz.membership.trimf(X, abca)
a__ : List[str] =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a__ : Dict =np.ones(75)
a__ : Tuple =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
a__ : Optional[int] =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a__ : Union[str, Any] =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a__ : str =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a__ : Optional[Any] =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a__ : List[str] =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a__ : Optional[int] =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a__ : Union[str, Any] =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a__ : int =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 53 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names)
| 269 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ :
lowerCamelCase__ = 42
lowerCamelCase__ = None
@staticmethod
def __a ( ) -> List[Any]:
raise NotImplementedError
def __a ( self , _a , _a , _a , **_a ) -> int:
raise NotImplementedError
def __a ( self , _a ) -> str:
raise NotImplementedError
def __a ( self ) -> Optional[Any]:
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def __a ( cls ) -> Union[str, Any]:
return f"`pip install {cls.pip_package or cls.name}`"
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''optuna'''
@staticmethod
def __a ( ) -> Union[str, Any]:
return is_optuna_available()
def __a ( self , _a , _a , _a , **_a ) -> Any:
return run_hp_search_optuna(_a , _a , _a , **_a )
def __a ( self , _a ) -> int:
return default_hp_space_optuna(_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''ray'''
lowerCamelCase__ = '''\'ray[tune]\''''
@staticmethod
def __a ( ) -> str:
return is_ray_available()
def __a ( self , _a , _a , _a , **_a ) -> Any:
return run_hp_search_ray(_a , _a , _a , **_a )
def __a ( self , _a ) -> Dict:
return default_hp_space_ray(_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''sigopt'''
@staticmethod
def __a ( ) -> Union[str, Any]:
return is_sigopt_available()
def __a ( self , _a , _a , _a , **_a ) -> str:
return run_hp_search_sigopt(_a , _a , _a , **_a )
def __a ( self , _a ) -> str:
return default_hp_space_sigopt(_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''wandb'''
@staticmethod
def __a ( ) -> Dict:
return is_wandb_available()
def __a ( self , _a , _a , _a , **_a ) -> Union[str, Any]:
return run_hp_search_wandb(_a , _a , _a , **_a )
def __a ( self , _a ) -> Union[str, Any]:
return default_hp_space_wandb(_a )
lowerCamelCase__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def A():
lowerCAmelCase_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__a ) > 0:
lowerCAmelCase_ = available_backends[0].name
if len(__a ) > 1:
logger.info(
F"{len(__a )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ):
snake_case_ = {}
if train_file is not None:
snake_case_ = [train_file]
if eval_file is not None:
snake_case_ = [eval_file]
if test_file is not None:
snake_case_ = [test_file]
snake_case_ = datasets.load_dataset('''csv''' , data_files=SCREAMING_SNAKE_CASE__ )
snake_case_ = list(ds[list(files.keys() )[0]].features.keys() )
snake_case_ = features_name.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case_ = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
snake_case_ = tokenizer.model_input_names
snake_case_ = {}
if len(SCREAMING_SNAKE_CASE__ ) == 1:
for k in files.keys():
snake_case_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) , batched=SCREAMING_SNAKE_CASE__ , )
elif len(SCREAMING_SNAKE_CASE__ ) == 2:
for k in files.keys():
snake_case_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) , batched=SCREAMING_SNAKE_CASE__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={"help": "Which column contains the label"} )
SCREAMING_SNAKE_CASE : str = field(default=__A , metadata={"help": "The path of the training file"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the development file"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the test file"} )
SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : bool = field(default=__A , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __SCREAMING_SNAKE_CASE ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case_, snake_case_, snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_, snake_case_, snake_case_, snake_case_ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case_ = TFTrainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
main()
| 8 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (DPMSolverSinglestepScheduler,)
lowerCamelCase = (('num_inference_steps', 25),)
def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : List[str] )-> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int:
'''simple docstring'''
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
return sample
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 5_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,)
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
A__ = self.full_loop(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
self.check_over_configs(variance_type=lowercase_ )
self.check_over_configs(variance_type='learned_range' )
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase_,time_step=0 )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop(use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction' )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
_UpperCAmelCase : Optional[Any] = XGLMConfig
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Dict = '''gelu'''
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_4 ,SCREAMING_SNAKE_CASE__ : Any=7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=9_9 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : str=4 ,SCREAMING_SNAKE_CASE__ : int=3_7 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : str=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Any = is_training
__lowerCamelCase : int = use_input_mask
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : int = d_model
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : Dict = attention_dropout
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : int = 1
def lowerCAmelCase ( self : Dict):
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3)
__lowerCamelCase : List[Any] = None
if self.use_input_mask:
__lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : List[str] = self.get_config()
__lowerCamelCase : Tuple = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : Any):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : str = self.prepare_config_and_inputs()
(
__lowerCamelCase
) : List[str] = config_and_inputs
__lowerCamelCase : Any = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : List[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : Optional[Any] = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : int = TFXGLMModelTester(self)
__lowerCamelCase : Dict = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7)
def lowerCAmelCase ( self : Tuple):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : List[Any]):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : int = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def lowerCAmelCase ( self : Optional[Any]):
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any=True):
__lowerCamelCase : List[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : str = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowerCamelCase : List[str] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
__lowerCamelCase : Union[str, Any] = tokenizer('Today is a nice day and' ,return_tensors='tf')
__lowerCamelCase : str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0])
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Optional[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : int = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = inputs['input_ids']
__lowerCamelCase : Tuple = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2)
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] ,return_tensors='tf').input_ids
__lowerCamelCase : Union[str, Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : int = tokenizer(sentences[1] ,return_tensors='tf').input_ids
__lowerCamelCase : Any = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
| 351 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
__lowerCamelCase : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), F"{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
try:
__lowerCamelCase : List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = "student" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase : int = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"teacher must be a model or string got type {type(lowerCamelCase__ )}"
__lowerCamelCase : str = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase : Dict = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase : Optional[int] = teacher_e
if d is None:
__lowerCamelCase : Optional[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__lowerCamelCase , __lowerCamelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase : Any = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase : Union[str, Any] = teacher_e
if d is None:
__lowerCamelCase : Any = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
__lowerCamelCase : str = teacher.config_class(**lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__lowerCamelCase : Dict = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 113 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class UpperCamelCase_ (__snake_case ):
__magic_name__ = "layoutlmv3"
def __init__( self : str , lowerCAmelCase_ : Any=50_265 , lowerCAmelCase_ : int=768 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[Any]=3_072 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : List[str]=1e-5 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=1_024 , lowerCAmelCase_ : Tuple=128 , lowerCAmelCase_ : Tuple=128 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Union[str, Any]=128 , lowerCAmelCase_ : Tuple=64 , lowerCAmelCase_ : Tuple=256 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=224 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : str , ) -> List[Any]:
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCAmelCase_ : List[Any] = max_ad_position_embeddings
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : Optional[int] = shape_size
UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias
UpperCAmelCase_ : Optional[int] = rel_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_pos
UpperCAmelCase_ : Dict = has_spatial_attention_bias
UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : Optional[Any] = visual_embed
UpperCAmelCase_ : List[str] = input_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Tuple = classifier_dropout
class UpperCamelCase_ (__snake_case ):
__magic_name__ = version.parse('''1.12''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return 12
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : "ProcessorMixin" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 40 , lowerCAmelCase_ : int = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , "apply_ocr" , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 268 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def _lowerCAmelCase ( __snake_case : np.ndarray ) -> np.ndarray:
__A : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def _lowerCAmelCase ( __snake_case : np.ndarray ) -> np.ndarray:
return (gray > 1_27) & (gray <= 2_55)
def _lowerCAmelCase ( __snake_case : np.ndarray , __snake_case : np.ndarray ) -> np.ndarray:
__A : Any = np.zeros_like(__snake_case )
__A : int = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__A : int = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__A : Tuple = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__A : Dict = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ : Union[str, Any] = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
lowercase__ : Union[str, Any] = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ : str = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 371 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta-prelayernorm'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = vocab_size
__A : List[str] = hidden_size
__A : Tuple = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Tuple = hidden_act
__A : Optional[Any] = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Any = type_vocab_size
__A : Optional[int] = initializer_range
__A : List[str] = layer_norm_eps
__A : Optional[int] = position_embedding_type
__A : Dict = use_cache
__A : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 190 | 0 |
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
if index == r:
for j in range(_A ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCamelCase = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _A ( _lowercase , _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 310 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = 0
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase : List[Any] = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase : Dict = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Dict = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = AutoConfig.from_pretrained(__a )
self.assertIsInstance(__a , __a )
# Check that tokenizer_type ≠ model_type
__lowercase : List[str] = AutoTokenizer.from_pretrained(__a , config=__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__a , """vocab.txt""" ) )
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(__a , tokenizer_type="""bert""" , use_fast=__a )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__a , """merges.txt""" ) )
__lowercase : Tuple = AutoTokenizer.from_pretrained(__a , tokenizer_type="""gpt2""" , use_fast=__a )
self.assertIsInstance(__a , __a )
@require_tokenizers
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__a , """vocab.txt""" ) )
__lowercase : Any = AutoTokenizer.from_pretrained(__a , tokenizer_type="""bert""" )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__a , """merges.txt""" ) )
__lowercase : Tuple = AutoTokenizer.from_pretrained(__a , tokenizer_type="""gpt2""" )
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__a ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase : Any = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
if isinstance(__a , __a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __a )
else:
self.assertEqual(tokenizer.do_lower_case , __a )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__a , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowercase : Optional[Any] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = TOKENIZER_MAPPING.values()
__lowercase : List[str] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__a )
@require_tokenizers
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__a ) , __a )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __a )
@require_tokenizers
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__a )
__lowercase : Optional[int] = """Hello, world. How are you?"""
__lowercase : List[Any] = tokenizer.tokenize(__a )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowercase : Tuple = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__a )
__lowercase : str = tokenizer.tokenize(__a )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__a ) , __a )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Any = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = get_tokenizer_config("""bert-base-cased""" )
__lowercase : List[str] = config.pop("""_commit_hash""" , __a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__a , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase : str = get_tokenizer_config(__a )
self.assertDictEqual(__a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase : str = AutoTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : List[Any] = get_tokenizer_config(__a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
__lowercase : List[Any] = CustomTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __a )
# Can register in two steps
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__a , slow_tokenizer_class=__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : int = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
__lowercase : str = CustomTokenizerFast.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : List[str] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__lowercase : int = AutoTokenizer.from_pretrained(__a , use_fast=__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
with self.assertRaises(__a ):
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
__lowercase : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a , use_fast=__a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = False
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = NewTokenizer
_A : Union[str, Any] = False
try:
AutoConfig.register("""custom""" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# If remote code is not set, the default is to use local
__lowercase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowercase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowercase : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowercase : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , revision="""aaaaaa""" )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 357 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A_ :
@property
def lowercase ( self : Optional[int] ):
return self.get_dummy_input()
@property
def lowercase ( self : List[str] ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def lowercase ( self : str , snake_case_ : Dict=True , snake_case_ : int=False , snake_case_ : Any=False , snake_case_ : Union[str, Any]=False , ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3_2
_UpperCAmelCase = (3_2, 3_2)
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = torch.device(snake_case_ )
_UpperCAmelCase = (batch_size, num_channels) + sizes
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ )
_UpperCAmelCase = {"hidden_states": hidden_states}
if include_temb:
_UpperCAmelCase = 1_2_8
_UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=snake_case_ , device=snake_case_ )
if include_res_hidden_states_tuple:
_UpperCAmelCase = torch.manual_seed(1 )
_UpperCAmelCase = (randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ ),)
if include_encoder_hidden_states:
_UpperCAmelCase = floats_tensor((batch_size, 3_2, 3_2) ).to(snake_case_ )
if include_skip_sample:
_UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=snake_case_ , device=snake_case_ )
return dummy_input
def lowercase ( self : List[str] ):
_UpperCAmelCase = {
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
_UpperCAmelCase = 3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : int , snake_case_ : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**snake_case_ )
unet_block.to(snake_case_ )
unet_block.eval()
with torch.no_grad():
_UpperCAmelCase = unet_block(**snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCAmelCase = output[0, -1, -3:, -3:]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
assert torch_all_close(output_slice.flatten() , snake_case_ , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def lowercase ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(**snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = output[0]
_UpperCAmelCase = torch.device(snake_case_ )
_UpperCAmelCase = randn_tensor(output.shape , device=snake_case_ )
_UpperCAmelCase = torch.nn.functional.mse_loss(snake_case_ , snake_case_ )
loss.backward()
| 22 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 1 |
"""simple docstring"""
from math import pow
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase = int(pow(lowercase_ , lowercase_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase , UpperCAmelCase = backtrack(
lowercase_ , lowercase_ , current_number + 1 , lowercase_ , lowercase_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase , UpperCAmelCase = backtrack(
lowercase_ , lowercase_ , current_number + 1 , lowercase_ , lowercase_ )
return current_sum, solutions_count
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(lowercase_ , lowercase_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = R'\w+[.]\d+'
UpperCAmelCase = re.findall(lowercase_ , lowercase_ )
for pat in pats:
UpperCAmelCase = key.replace(lowercase_ , '_'.join(pat.split('.' ) ) )
return key
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=42 ):
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase = flax_model.init_weights(PRNGKey(lowercase_ ) )
UpperCAmelCase = flatten_dict(lowercase_ )
UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = rename_key(lowercase_ )
UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(lowercase_ , lowercase_ , lowercase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(lowercase_ )
return unflatten_dict(lowercase_ )
| 181 | 1 |
__A ={
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__A ={value: key for key, value in encode_dict.items()}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def lowerCamelCase_ ( lowerCamelCase__ ):
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only \'A\', \'B\' and spaces" )
lowerCamelCase_ = ""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 19 |
"""simple docstring"""
from __future__ import annotations
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> list[int]:
return [ord(SCREAMING_SNAKE_CASE_ ) - 96 for elem in plain]
def lowercase (SCREAMING_SNAKE_CASE_ : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase () -> None:
SCREAMING_SNAKE_CASE = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , SCREAMING_SNAKE_CASE_ )
print('Decoded:' , decode(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
| 113 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__a = '''pt'''
elif is_tf_available():
__a = '''tf'''
else:
__a = '''jax'''
class __SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
A : Tuple = PerceiverTokenizer
A : Union[str, Any] = False
def __lowerCamelCase ( self ):
super().setUp()
lowercase : Any = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
lowercase : Any = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _UpperCAmelCase ) )
lowercase : Any = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase : str = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase : int = toks + toks
# toks_str = [t[1] for t in toks]
lowercase : int = [t[0] for t in toks]
# Ensure consistency
lowercase : Union[str, Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase : Optional[Any] = ' ' + output_txt
lowercase : Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def __lowerCamelCase ( self ):
lowercase : Any = self.perceiver_tokenizer
lowercase : Optional[int] = 'Unicode €.'
lowercase : int = tokenizer(_UpperCAmelCase )
lowercase : Any = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , _UpperCAmelCase )
# decoding
lowercase : Union[str, Any] = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '''[CLS]Unicode €.[SEP]''' )
lowercase : List[str] = tokenizer('''e è é ê ë''' )
lowercase : Optional[Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , _UpperCAmelCase )
# decoding
lowercase : Tuple = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = self.perceiver_tokenizer
lowercase : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowercase : Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowercase : Optional[Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
lowercase : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.perceiver_tokenizer
lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _UpperCAmelCase )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertNotIn('''decoder_input_ids''' , _UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , _UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : List[str] = self.perceiver_tokenizer
lowercase : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
lowercase : List[Any] = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __lowerCamelCase ( self ):
lowercase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase : Dict = tempfile.mkdtemp()
lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
lowercase : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase : List[str] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase : Tuple = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase : Union[str, Any] = tempfile.mkdtemp()
lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowercase : str = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowercase : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase : List[str] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase : Any = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase : Tuple = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase : Union[str, Any] = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase : Dict = json.load(_UpperCAmelCase )
lowercase : List[str] = [f"""<extra_id_{i}>""" for i in range(125 )]
lowercase : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowercase : Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase : int = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase : List[str] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_UpperCAmelCase )]
lowercase : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __lowerCamelCase ( self ):
lowercase : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase : List[str] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowercase : Union[str, Any] = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 337 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCAmelCase ( __snake_case : str , __snake_case : complex , __snake_case : str = "x" , __snake_case : float = 10**-10 , __snake_case : int = 1 , ) -> complex:
__A : int = symbols(__snake_case )
__A : Tuple = lambdify(__snake_case , __snake_case )
__A : Any = lambdify(__snake_case , diff(__snake_case , __snake_case ) )
__A : str = starting_point
while True:
if diff_function(__snake_case ) != 0:
__A : Optional[Any] = prev_guess - multiplicity * func(__snake_case ) / diff_function(
__snake_case )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 190 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 175 |
"""simple docstring"""
def snake_case_ ( A_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(A_, A_ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(A_, A_ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : List[str] = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCAmelCase : Optional[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCAmelCase : List[Any] = 0.01
with locka.acquire():
with pytest.raises(snake_case__ ):
UpperCAmelCase : Tuple = time.time()
locka.acquire(snake_case__ )
assert time.time() - _start > timeout
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Optional[int] = """a""" * 1000 + """.lock"""
UpperCAmelCase : List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(snake_case__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase : Optional[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case__ ):
locka.acquire(0 )
| 109 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCAmelCase : Any = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowerCAmelCase : List[str] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowerCAmelCase : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def A_( A : str):
UpperCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def A_( A : tuple):
return x[0]
def A_( A : str):
UpperCamelCase = get_letter_count(A)
UpperCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A)
UpperCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A)
UpperCamelCase = ''.join(freq_to_letter[freq])
UpperCamelCase = list(freq_to_letter_str.items())
freq_pairs.sort(key=A , reverse=A)
UpperCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A)
def A_( A : str):
UpperCamelCase = get_frequency_order(A)
UpperCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 |
'''simple docstring'''
lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def A_( A : int):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A))
def A_( ):
return sum(
number
for number in range(1000 , 100_0000)
if number == digits_fifth_powers_sum(A))
if __name__ == "__main__":
print(solution())
| 251 | 1 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def a__ ( lowerCAmelCase__ , lowerCAmelCase__="shi-labs/oneformer_demo" ) -> Any:
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase__ : List[Any] = json.load(lowerCAmelCase__ )
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : int = []
for key, info in class_info.items():
UpperCAmelCase__ : Dict = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__ ) )
UpperCAmelCase__ : Dict = thing_ids
UpperCAmelCase__ : str = class_names
return metadata
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , _A : List[str] , _A : List[str]=7 , _A : str=3 , _A : Union[str, Any]=30 , _A : int=400 , _A : Optional[int]=None , _A : Dict=True , _A : Optional[int]=True , _A : List[Any]=[0.5, 0.5, 0.5] , _A : List[str]=[0.5, 0.5, 0.5] , _A : int=10 , _A : Any=False , _A : List[Any]=255 , _A : str="shi-labs/oneformer_demo" , _A : Dict="ade20k_panoptic.json" , _A : List[str]=10 , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Optional[int] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : List[Any] = {'''shortest_edge''': 32, '''longest_edge''': 1_333} if size is None else size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : Any = image_mean
UpperCAmelCase__ : Union[str, Any] = image_std
UpperCAmelCase__ : int = class_info_file
UpperCAmelCase__ : str = prepare_metadata(_A , _A )
UpperCAmelCase__ : List[str] = num_text
UpperCAmelCase__ : Optional[int] = repo_path
# for the post_process_functions
UpperCAmelCase__ : Optional[Any] = 2
UpperCAmelCase__ : Dict = 10
UpperCAmelCase__ : Optional[int] = 10
UpperCAmelCase__ : List[Any] = 3
UpperCAmelCase__ : Union[str, Any] = 4
UpperCAmelCase__ : List[str] = num_labels
UpperCAmelCase__ : Any = do_reduce_labels
UpperCAmelCase__ : Optional[Any] = ignore_index
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase_ ( self : Any , _A : str , _A : int=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Union[str, Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : Any = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
UpperCAmelCase__ : str = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase__ : Union[str, Any] = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : Tuple = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Optional[int] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : int = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCAmelCase__ = image_processing_class
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = OneFormerImageProcessorTester(self )
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Dict = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : str = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : str , _A : Optional[int]=False , _A : Optional[Any]=False , _A : Any="np" ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ : int = self.image_processing_tester.num_labels
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase__ : Tuple = num_labels
if is_instance_map:
UpperCAmelCase__ : int = list(range(_A ) ) * 2
UpperCAmelCase__ : Any = dict(enumerate(_A ) )
UpperCAmelCase__ : Optional[int] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ : Tuple = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase__ : Any = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : str ):
'''simple docstring'''
def common(_A : Dict=False , _A : Tuple=None ):
UpperCAmelCase__ : Union[str, Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase__ : Tuple = inputs['''mask_labels''']
UpperCAmelCase__ : str = inputs['''class_labels''']
UpperCAmelCase__ : Dict = inputs['''pixel_values''']
UpperCAmelCase__ : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = np.zeros((20, 50) )
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : Any = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : List[Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ : int = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ : Tuple = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase__ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : Dict = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 181 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase__ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 181 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = "perceiver"
def __init__( self , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1_280 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=26 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="kv" , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=262 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=56 , __SCREAMING_SNAKE_CASE=[368, 496] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1_920 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[1, 16, 224, 224] , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = num_latents
UpperCamelCase : int = d_latents
UpperCamelCase : Tuple = d_model
UpperCamelCase : Dict = num_blocks
UpperCamelCase : Optional[int] = num_self_attends_per_block
UpperCamelCase : Any = num_self_attention_heads
UpperCamelCase : Dict = num_cross_attention_heads
UpperCamelCase : List[Any] = qk_channels
UpperCamelCase : Optional[Any] = v_channels
UpperCamelCase : Any = cross_attention_shape_for_attention
UpperCamelCase : List[str] = self_attention_widening_factor
UpperCamelCase : Union[str, Any] = cross_attention_widening_factor
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Any = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = use_query_residual
# masked language modeling attributes
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[int] = max_position_embeddings
# image classification attributes
UpperCamelCase : Optional[int] = image_size
# flow attributes
UpperCamelCase : List[Any] = train_size
# multimodal autoencoding attributes
UpperCamelCase : Optional[int] = num_frames
UpperCamelCase : int = audio_samples_per_frame
UpperCamelCase : List[str] = samples_per_patch
UpperCamelCase : Any = output_shape
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 40 , __SCREAMING_SNAKE_CASE = 40 , ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase : Tuple = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Optional[Any] = preprocessor.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : Dict = [''' '''.join(['''a'''] ) * seq_length] * batch_size
UpperCamelCase : int = dict(preprocessor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = inputs.pop('''input_ids''' )
return inputs
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase : str = compute_effective_axis_dimension(__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase : Tuple = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = dict(preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[int] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 315 |
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : int = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
SCREAMING_SNAKE_CASE : int = f"""{src_lang}-{tgt_lang}"""
SCREAMING_SNAKE_CASE : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''README.md''' )
print(f"""Generating {path}""" )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
# make sure we are under the root of the project
__UpperCamelCase : List[Any] = Path(__file__).resolve().parent.parent.parent
__UpperCamelCase : List[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase : Optional[Any] = model_name.split('-')
__UpperCamelCase : int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 182 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 111 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE_ = {
"""google/rembert""": 2_5_6,
}
SCREAMING_SNAKE_CASE_ = """▁"""
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = VOCAB_FILES_NAMES
__snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
__snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Dict = RemBertTokenizer
def __init__( self : Any ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : Tuple="[SEP]" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : Union[str, Any]="[SEP]" ,lowerCamelCase__ : Dict="<pad>" ,lowerCamelCase__ : int="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Union[str, Any] ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 193 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = 'src/transformers'
# Matches is_xxx_available()
a_ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a_ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a_ = re.compile(r'^\s*try:')
# Catches a line with else:
a_ = re.compile(r'^\s*else:')
def __lowercase ( lowerCamelCase : List[Any] ):
if _re_test_backend.search(lowerCamelCase ) is None:
return None
UpperCamelCase_ : Optional[int] = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[str] ):
with open(lowerCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase_ : int = f.readlines()
UpperCamelCase_ : Dict = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase_ : Dict = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase_ : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
UpperCamelCase_ : Tuple = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
UpperCamelCase_ : int = re.findall('\[([^\]]+)\]' , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
UpperCamelCase_ : List[Any] = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
UpperCamelCase_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase_ : Tuple = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase_ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_ : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
UpperCamelCase_ : int = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
UpperCamelCase_ : Any = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(', ' )
UpperCamelCase_ : Union[str, Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
UpperCamelCase_ : Optional[Any] = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(', ' )
UpperCamelCase_ : Optional[Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase_ : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase_ : List[Any] = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
UpperCamelCase_ : Any = lines[line_index]
UpperCamelCase_ : str = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase_ : Optional[Any] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase_ : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
UpperCamelCase_ : List[Any] = lines[line_index]
UpperCamelCase_ : Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase_ : List[str] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
def find_duplicates(lowerCamelCase : Optional[Any] ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase_ : List[str] = []
for key in import_dict_objects.keys():
UpperCamelCase_ : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCamelCase_ : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase_ : Union[str, Any] = 'base imports' if key == 'none' else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __lowercase ( ):
UpperCamelCase_ : Any = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
UpperCamelCase_ : int = os.path.join(lowerCamelCase , '__init__.py' )
UpperCamelCase_ : int = parse_init(lowerCamelCase )
if objects is not None:
UpperCamelCase_ : List[str] = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : Any = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('\n'.join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError('\n\n'.join(lowerCamelCase ) )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob('*.py' ) ) ) == 0:
continue
UpperCamelCase_ : Tuple = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
UpperCamelCase_ : int = short_path.replace(os.path.sep , '.' )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase_ : int = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
UpperCamelCase_ : Optional[int] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
a_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __lowercase ( ):
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ : int = importlib.util.spec_from_file_location(
'transformers' , os.path.join(lowerCamelCase , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCamelCase_ : str = spec.loader.load_module()
UpperCamelCase_ : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : List[str] = '\n'.join(F"- {module}" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"{list_of_modules}\n"
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 175 |
from torch import nn
class _lowercase ( nn.Module ):
def __init__( self : Any , snake_case : Dict , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : List[Any] = class_size
UpperCamelCase_ : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase_ : int = nn.Linear(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = self.mlp(snake_case )
return logits
| 175 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=[] ):
_UpperCAmelCase : List[str] = size[0] - overlap_pixels * 2
_UpperCAmelCase : List[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_UpperCAmelCase : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_UpperCAmelCase : Any = np.pad(__lowerCAmelCase , mode="linear_ramp" , pad_width=__lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
_UpperCAmelCase : List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_UpperCAmelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_UpperCAmelCase : int = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_UpperCAmelCase : List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return max(__lowerCAmelCase , min(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = list(__lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_UpperCAmelCase : List[str] = clamp_rect(__lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__lowerCAmelCase , (original_slice, 0) )
return result
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_UpperCAmelCase : Tuple = tile.crop(__lowerCAmelCase )
return tile
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = n % d
return n - divisor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : DDPMScheduler , lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase__ : int = 3_50 , ) ->Tuple:
'''simple docstring'''
super().__init__(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , low_res_scheduler=lowerCamelCase__ , scheduler=lowerCamelCase__ , max_noise_level=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , **lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : int = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_UpperCAmelCase : Any = add_overlap_rect(lowerCamelCase__ , lowerCamelCase__ , image.size )
_UpperCAmelCase : List[Any] = image.crop(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_UpperCAmelCase : Tuple = translated_slice_x - (original_image_slice / 2)
_UpperCAmelCase : Tuple = max(0 , lowerCamelCase__ )
_UpperCAmelCase : int = squeeze_tile(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = to_input.size
_UpperCAmelCase : Union[str, Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_UpperCAmelCase : List[Any] = super(lowerCamelCase__ , self ).__call__(image=lowerCamelCase__ , **lowerCamelCase__ ).images[0]
_UpperCAmelCase : str = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_UpperCAmelCase : Any = unsqueeze_tile(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_UpperCAmelCase : Optional[int] = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
_UpperCAmelCase : str = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCamelCase__ ) , mode="L" , )
final_image.paste(
lowerCamelCase__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCamelCase__ )
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCamelCase__ : int = 75 , lowerCamelCase__ : float = 9.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1_28 , lowerCamelCase__ : int = 32 , lowerCamelCase__ : int = 32 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
_UpperCAmelCase : Dict = math.ceil(image.size[0] / tile_size )
_UpperCAmelCase : Dict = math.ceil(image.size[1] / tile_size )
_UpperCAmelCase : Dict = tcx * tcy
_UpperCAmelCase : Optional[Any] = 0
for y in range(lowerCamelCase__ ):
for x in range(lowerCamelCase__ ):
self._process_tile(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , prompt=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , noise_level=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def __lowerCAmelCase ():
# Run a demo
_UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : List[str] = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCAmelCase , revision="fp16" , torch_dtype=torch.floataa )
_UpperCAmelCase : Optional[int] = pipe.to("cuda" )
_UpperCAmelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(__lowerCAmelCase ):
print(F"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
_UpperCAmelCase : Optional[int] = pipe(image=__lowerCAmelCase , prompt="Black font, white background, vector" , noise_level=40 , callback=__lowerCAmelCase )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 322 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int=False ,__UpperCamelCase: str=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'backbone.' if is_semantic else ''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"{prefix}cls_token", 'beit.embeddings.cls_token'),
(f"{prefix}patch_embed.proj.weight", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"{prefix}patch_embed.proj.bias", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"{prefix}pos_embed", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[int]=False ,__UpperCamelCase: Union[str, Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE : List[Any] = 'backbone.' if is_semantic else ''
# queries, keys and values
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias" )
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Dict = q_bias
SCREAMING_SNAKE_CASE : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}blocks.{i}.gamma_1" )
SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(f"{prefix}blocks.{i}.gamma_2" )
SCREAMING_SNAKE_CASE : Any = gamma_a
SCREAMING_SNAKE_CASE : List[str] = gamma_a
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Any ,__UpperCamelCase: int=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False if 'rvlcdip' in checkpoint_url else True
SCREAMING_SNAKE_CASE : Dict = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase ,use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] = 10_24
SCREAMING_SNAKE_CASE : Union[str, Any] = 40_96
SCREAMING_SNAKE_CASE : Dict = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : List[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE : Any = 'rvlcdip-id2label.json'
SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
SCREAMING_SNAKE_CASE : Optional[int] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[int] = idalabel
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='cpu' )['model']
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(__UpperCamelCase ,has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,has_lm_head=__UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
SCREAMING_SNAKE_CASE : Optional[int] = BeitImageProcessor(
size=config.image_size ,resample=PILImageResampling.BILINEAR ,do_center_crop=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='pt' )
SCREAMING_SNAKE_CASE : str = encoding['pixel_values']
SCREAMING_SNAKE_CASE : Optional[Any] = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = outputs.logits
# verify logits
SCREAMING_SNAKE_CASE : List[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
SCREAMING_SNAKE_CASE : int = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
SCREAMING_SNAKE_CASE : int = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='nielsr' ,commit_message='Add image processor' ,use_temp_dir=__UpperCamelCase ,)
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='nielsr' ,commit_message='Add model' ,use_temp_dir=__UpperCamelCase ,)
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
UpperCamelCase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 251 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] )
return (item, float(__UpperCamelCase ))
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = random.randint(0 ,len(__UpperCamelCase ) - 1 )
SCREAMING_SNAKE_CASE : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: list[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = list(__UpperCamelCase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Optional[Any] = random.choice(__UpperCamelCase )
return "".join(__UpperCamelCase )
def lowercase__( __UpperCamelCase: tuple[str, float] ,__UpperCamelCase: list[tuple[str, float]] ,__UpperCamelCase: list[str] ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : Optional[Any] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 ,__UpperCamelCase )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = crossover(parent_a[0] ,__UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCamelCase ,__UpperCamelCase ) )
pop.append(mutate(__UpperCamelCase ,__UpperCamelCase ) )
return pop
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: list[str] ,__UpperCamelCase: bool = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[str] = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : List[Any] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__UpperCamelCase )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Optional[Any] = []
for _ in range(__UpperCamelCase ):
population.append(''.join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : Optional[int] = [evaluate(__UpperCamelCase ,__UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x[1] ,reverse=__UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCamelCase )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : str = [
(item, score / len(__UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCamelCase ):
population.extend(select(population_score[int(__UpperCamelCase )] ,__UpperCamelCase ,__UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCamelCase_ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 251 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A : str = logging.get_logger('''transformers.models.speecht5''')
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : int ,lowerCamelCase : Union[str, Any] ):
hf_model.apply_weight_norm()
_A : Optional[int] = checkpoint["""input_conv.weight_g"""]
_A : str = checkpoint["""input_conv.weight_v"""]
_A : str = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_A : Dict = checkpoint[F'upsamples.{i}.1.weight_g']
_A : Any = checkpoint[F'upsamples.{i}.1.weight_v']
_A : Union[str, Any] = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_A : Tuple = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_A : Dict = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_A : Optional[Any] = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_A : Tuple = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_A : Optional[Any] = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_A : Tuple = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_A : Optional[int] = checkpoint["""output_conv.1.weight_g"""]
_A : Optional[Any] = checkpoint["""output_conv.1.weight_v"""]
_A : Union[str, Any] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : int ,lowerCamelCase : Any ,lowerCamelCase : Tuple=None ,lowerCamelCase : Dict=None ,):
if config_path is not None:
_A : Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(_a )
else:
_A : str = SpeechTaHifiGanConfig()
_A : List[str] = SpeechTaHifiGan(_a )
_A : int = torch.load(_a )
load_weights(orig_checkpoint['model']['generator'] ,_a ,_a )
_A : List[Any] = np.load(_a )
_A : Optional[Any] = stats[0].reshape(-1 )
_A : int = stats[1].reshape(-1 )
_A : Any = torch.from_numpy(_a ).float()
_A : int = torch.from_numpy(_a ).float()
model.save_pretrained(_a )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_a )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A : Any = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 365 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : List[Any] = i / num_diffusion_timesteps
_A : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = [e.name for e in KarrasDiffusionSchedulers]
a = 2
@register_to_config
def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0_0085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ):
if trained_betas is not None:
_A : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Any = 1.0 - self.betas
_A : List[Any] = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None):
if schedule_timesteps is None:
_A : Dict = self.timesteps
_A : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
_A : Dict = 1 if len(SCREAMING_SNAKE_CASE) > 1 else 0
else:
_A : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
_A : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A ( self : Optional[Any]):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ):
_A : Tuple = self.index_for_timestep(SCREAMING_SNAKE_CASE)
if self.state_in_first_order:
_A : Any = self.sigmas[step_index]
else:
_A : int = self.sigmas_interpol[step_index]
_A : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ):
_A : Optional[Any] = num_inference_steps
_A : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A : Tuple = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : int = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A : List[str] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : str = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio)).round().copy().astype(SCREAMING_SNAKE_CASE)
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.')
_A : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
_A : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : str = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE)) , SCREAMING_SNAKE_CASE)
_A : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(device=SCREAMING_SNAKE_CASE)
# interpolate sigmas
_A : Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
_A : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
_A : List[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
# mps does not support float64
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
else:
_A : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
# interpolate timesteps
_A : Optional[int] = self.sigma_to_t(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype)
_A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
_A : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps])
_A : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
# get log sigma
_A : Dict = sigma.log()
# get distribution
_A : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_A : Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
_A : Union[str, Any] = low_idx + 1
_A : Dict = self.log_sigmas[low_idx]
_A : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_A : Dict = (low - log_sigma) / (low - high)
_A : Union[str, Any] = w.clamp(0 , 1)
# transform interpolation to time range
_A : int = (1 - w) * low_idx + w * high_idx
_A : Any = t.view(sigma.shape)
return t
@property
def A ( self : Any):
return self.sample is None
def A ( self : int , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ):
_A : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE)
# advance index counter by 1
_A : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A : Tuple = self.sigmas[step_index]
_A : Dict = self.sigmas_interpol[step_index + 1]
_A : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_A : int = self.sigmas[step_index - 1]
_A : Union[str, Any] = self.sigmas_interpol[step_index]
_A : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A : List[Any] = 0
_A : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`')
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A : str = sigma_interpol - sigma_hat
# store for 2nd order step
_A : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_A : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_A : Optional[int] = sigma_next - sigma_hat
_A : str = self.sample
_A : Any = None
_A : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE):
# mps does not support float64
_A : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa)
_A : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
_A : str = self.timesteps.to(original_samples.device)
_A : str = timesteps.to(original_samples.device)
_A : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for t in timesteps]
_A : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
_A : List[Any] = sigma.unsqueeze(-1)
_A : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 1 |
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = Rectangle(height=0.5 , width=0.5)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : List[Any] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Optional[Any] = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
SCREAMING_SNAKE_CASE_ : int = VGroup(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''CPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Tuple = Group(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__)
cpu.move_to([-2.5, -0.5, 0])
self.add(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [mem.copy() for i in range(1)]
SCREAMING_SNAKE_CASE_ : Any = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
SCREAMING_SNAKE_CASE_ : Any = Text('''GPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : str = Group(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__)
gpu.align_to(UpperCamelCase__ , UpperCamelCase__)
gpu.set_x(gpu.get_x() - 1)
self.add(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VGroup(*UpperCamelCase__).arrange(UpperCamelCase__ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = Text('''Model''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Optional[int] = Group(UpperCamelCase__ , UpperCamelCase__).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__)
model.move_to([3, -1.0, 0])
self.play(
Create(UpperCamelCase__ , run_time=1) , Create(UpperCamelCase__ , run_time=1) , Create(UpperCamelCase__ , run_time=1) , )
SCREAMING_SNAKE_CASE_ : List[str] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
SCREAMING_SNAKE_CASE_ : Any = Square(side_length=2.2)
key.move_to([-5, 2, 0])
SCREAMING_SNAKE_CASE_ : Dict = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(UpperCamelCase__ , run_time=2.5) , Write(UpperCamelCase__) , Write(UpperCamelCase__))
self.add(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = []
for i, rect in enumerate(UpperCamelCase__):
SCREAMING_SNAKE_CASE_ : str = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(UpperCamelCase__ , opacity=0.7)
cpu_target.move_to(UpperCamelCase__)
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.46 / 4
SCREAMING_SNAKE_CASE_ : int = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=UpperCamelCase__)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0)
cpu_targs.append(UpperCamelCase__)
first_animations.append(rect.animate(run_time=0.5).set_stroke(UpperCamelCase__))
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5))
self.play(*UpperCamelCase__)
self.play(*UpperCamelCase__)
self.wait()
| 367 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=30,__lowerCamelCase=2,__lowerCamelCase=3,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=32,__lowerCamelCase=2,__lowerCamelCase=4,__lowerCamelCase=37,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=10,__lowerCamelCase=0.02,__lowerCamelCase=3,__lowerCamelCase=None,__lowerCamelCase=2,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 2
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return DeiTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__lowerCamelCase,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = TFDeiTModel(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = TFDeiTForMaskedImageModeling(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = TFDeiTForMaskedImageModeling(__lowerCamelCase )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.type_sequence_label_size
A__ = TFDeiTForImageClassification(__lowerCamelCase )
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = TFDeiTForImageClassification(__lowerCamelCase )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = TFDeiTModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase,hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase,tf.keras.layers.Dense ) )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=False ):
A__ = super()._prepare_for_class(__lowerCamelCase,__lowerCamelCase,return_labels=__lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase ( self ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( )->int:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
A__ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''tf''' )
# forward pass
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
| 193 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (DPMSolverSinglestepScheduler,)
__SCREAMING_SNAKE_CASE = (('''num_inference_steps''', 25),)
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(__lowerCamelCase,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self,__lowerCamelCase=None,**__lowerCamelCase ):
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 50
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCamelCase ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,algorithm_type='''dpmsolver++''',solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,)
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,prediction_type=__lowerCamelCase,algorithm_type=__lowerCamelCase,)
A__ = self.full_loop(
solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,prediction_type=__lowerCamelCase,algorithm_type=__lowerCamelCase,)
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def UpperCamelCase ( self ):
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase ( self ):
self.check_over_configs(variance_type=__lowerCamelCase )
self.check_over_configs(variance_type='''learned_range''' )
def UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase,time_step=0 )
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(use_karras_sigmas=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''',use_karras_sigmas=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=__lowerCamelCase,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**__lowerCamelCase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 193 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ) -> Tuple:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase ) for s in shape] )}.npy'''
def UpperCAmelCase ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict=0 , UpperCAmelCase : str=(4, 4, 6_4, 6_4) , UpperCAmelCase : List[str]=False ) -> int:
__lowerCAmelCase: List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase: Any = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase )
return image
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : str="CompVis/stable-diffusion-v1-4" ) -> Dict:
__lowerCAmelCase: str = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase: Union[str, Any] = 'bf16' if fpaa else None
__lowerCAmelCase , __lowerCAmelCase: int = FlaxUNetaDConditionModel.from_pretrained(
UpperCAmelCase , subfolder='unet' , dtype=UpperCAmelCase , revision=UpperCAmelCase )
return model, params
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : List[Any]=(4, 7_7, 7_6_8) , UpperCAmelCase : Any=False ) -> List[str]:
__lowerCAmelCase: Dict = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase: List[str] = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=UpperCAmelCase )
__lowerCAmelCase: str = self.get_latents(UpperCAmelCase , fpaa=UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.get_encoder_hidden_states(UpperCAmelCase , fpaa=UpperCAmelCase )
__lowerCAmelCase: List[str] = model.apply(
{'params': params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase: Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase: List[Any] = jnp.array(UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=UpperCAmelCase )
__lowerCAmelCase: Any = self.get_latents(UpperCAmelCase , shape=(4, 4, 9_6, 9_6) , fpaa=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = self.get_encoder_hidden_states(UpperCAmelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=UpperCAmelCase )
__lowerCAmelCase: str = model.apply(
{'params': params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase: str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase: str = jnp.array(UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
| 322 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
__A : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__A : Optional[int] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : Any = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
snake_case_ : Union[str, Any] = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
snake_case_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ : int = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Tuple = value
elif weight_type == "bias":
snake_case_ : str = value
else:
snake_case_ : Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Any = fairseq_model.state_dict()
snake_case_ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Optional[int] = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ : Tuple = True
if "*" in mapped_key:
snake_case_ : int = name.split(lowerCamelCase_ )[0].split(""".""" )[-2]
snake_case_ : Union[str, Any] = mapped_key.replace("""*""" , lowerCamelCase_ )
if "weight_g" in name:
snake_case_ : List[Any] = """weight_g"""
elif "weight_v" in name:
snake_case_ : str = """weight_v"""
elif "bias" in name:
snake_case_ : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Dict = """weight"""
else:
snake_case_ : Union[str, Any] = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
snake_case_ : int = name.split(""".""" )
snake_case_ : List[Any] = int(items[0] )
snake_case_ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :str=True ):
'''simple docstring'''
if config_path is not None:
snake_case_ : int = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
else:
snake_case_ : Union[str, Any] = UniSpeechSatConfig()
snake_case_ : List[str] = """"""
if is_finetuned:
snake_case_ : List[Any] = UniSpeechSatForCTC(lowerCamelCase_ )
else:
snake_case_ : Optional[Any] = UniSpeechSatForPreTraining(lowerCamelCase_ )
snake_case_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : List[Any] = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 359 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ):
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : Optional[Any] = np.asarray(weights[0] )
snake_case_ : int = np.asarray(weights[1] )
snake_case_ : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : List[Any] = np.asarray(weights[0] )
snake_case_ : Optional[int] = np.asarray(weights[1] )
snake_case_ : Union[str, Any] = np.asarray(weights[2] )
snake_case_ : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
# layernorm 1
snake_case_ : str = weights[0][0][0]
snake_case_ : int = np.asarray(layer_norm_a[0] )
snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# lsh weights + output
snake_case_ : Tuple = weights[0][1]
if len(lowerCamelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
else:
set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
# intermediate weighs
snake_case_ : str = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase_ ) == 4:
snake_case_ : List[Any] = intermediate_weights[2]
# layernorm 2
snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] )
snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# intermediate dense
snake_case_ : Any = np.asarray(intermediate_weights[1][0] )
snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
# intermediate out
snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] )
snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ):
'''simple docstring'''
# reformer model
snake_case_ : Dict = torch_model.reformer
# word embeds
snake_case_ : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , )
if isinstance(weights[3] , lowerCamelCase_ ):
snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) )
snake_case_ : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# output layer norm
snake_case_ : Optional[Any] = np.asarray(weights[7][0] )
snake_case_ : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# output embeddings
snake_case_ : Optional[int] = np.asarray(weights[9][0] )
snake_case_ : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
# Initialise PyTorch model
snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ )
with open(lowerCamelCase_ , """rb""" ) as f:
snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 8 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 54 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def a( A : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a( A : np.ndarray , A : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
a = XGBClassifier()
classifier.fit(A , A )
return classifier
def a( ) -> None:
"""simple docstring"""
a = load_iris()
a , a = data_handling(A )
a , a , a , a = train_test_split(
A , A , test_size=0.25 )
a = iris["target_names"]
# Create an XGBoost Classifier from the training data
a = xgboost(A , A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A , A , A , display_labels=A , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 227 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = '''▁'''
A_ = {'''vocab_file''': '''prophetnet.tokenizer'''}
A_ = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
A_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
A_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : str = collections.OrderedDict()
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as reader:
_snake_case : Tuple = reader.readlines()
for index, token in enumerate(snake_case__ ):
_snake_case : Dict = token.rstrip("""\n""" )
_snake_case : Dict = index
return vocab
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: str, a_: Optional[int], a_: Any="[SEP]", a_: List[str]="[SEP]", a_: Union[str, Any]="[SEP]", a_: Any="[UNK]", a_: Dict="[PAD]", a_: Dict="[CLS]", a_: Union[str, Any]="[MASK]", a_: Optional[Dict[str, Any]] = None, **a_: Optional[Any], ):
'''simple docstring'''
_snake_case : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_, eos_token=a_, sep_token=a_, unk_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
_snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
_snake_case : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_snake_case : Optional[Any] = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
_snake_case : str = f"[unused{i}]"
_snake_case : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_snake_case : List[Any] = 12
_snake_case : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(a_ )
def __getstate__( self: str ):
'''simple docstring'''
_snake_case : List[str] = self.__dict__.copy()
_snake_case : int = None
return state
def __setstate__( self: int, a_: Dict ):
'''simple docstring'''
_snake_case : int = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
_snake_case : Dict = {}
_snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self: Dict, a_: List[int], a_: Optional[List[int]] = None, a_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is None:
return ([0] * len(a_ )) + [1]
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self: Tuple, a_: str ):
'''simple docstring'''
return self.sp_model.encode(a_, out_type=a_ )
def UpperCamelCase_ ( self: Dict, a_: Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_snake_case : Optional[int] = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self: Any, a_: Optional[int] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self: Any, a_: Any ):
'''simple docstring'''
_snake_case : Any = """""".join(a_ ).replace(a_, """ """ ).strip()
return out_string
def UpperCamelCase_ ( self: Tuple, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : List[Any] = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, """wb""" ) as fi:
_snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCamelCase_ ( self: Union[str, Any], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_snake_case : Optional[Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 368 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : int=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_snake_case : Dict = os.path.abspath(snake_case__ )
logger.info(F"Loading PyTorch weights from {pt_path}" )
_snake_case : Tuple = torch.load(snake_case__ , map_location="""cpu""" )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
_snake_case : int = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_snake_case : Dict = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def UpperCAmelCase__ (snake_case__ : Tuple[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, jnp.ndarray] , snake_case__ : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(snake_case__ : Tuple[str] ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_snake_case : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_snake_case : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_snake_case : Any = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_snake_case : Any = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_snake_case : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_snake_case : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_snake_case : List[Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_snake_case : Tuple = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_snake_case : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_snake_case : Union[str, Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_snake_case : Dict = pt_tuple_key[-2] + """_v"""
if name is not None:
_snake_case : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_snake_case : Dict = flax_model.params["""params"""]
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(snake_case__ )
_snake_case : Tuple = {}
_snake_case : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[int] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : int = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_snake_case : Union[str, Any] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
import torch
# Load the index
_snake_case : str = {}
for shard_file in shard_filenames:
# load using msgpack utils
_snake_case : Union[str, Any] = torch.load(snake_case__ )
_snake_case : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : str = flax_model.params["""params"""]
_snake_case : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
_snake_case : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : List[str] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : Optional[Any] = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_snake_case : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
_snake_case : Any = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = os.path.abspath(snake_case__ )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
_snake_case : Union[str, Any] = getattr(snake_case__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , """rb""" ) as state_f:
try:
_snake_case : Dict = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_snake_case : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_snake_case : Optional[int] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
_snake_case : Dict = flatten_dict(snake_case__ )
_snake_case : Optional[Any] = pt_model.state_dict()
_snake_case : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_snake_case : Optional[int] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_snake_case : str = []
_snake_case : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_snake_case : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix
_snake_case : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : int = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_snake_case : Tuple = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_snake_case : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_snake_case : int = """.""".join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_snake_case : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_snake_case : List[str] = key.split(""".""" )
_snake_case : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_snake_case : int = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_snake_case : Union[str, Any] = key_components[-2] + """_v"""
if name is not None:
_snake_case : Dict = key_components[:-3] + [name]
_snake_case : Dict = """.""".join(snake_case__ )
_snake_case : str = key
if flax_key in special_pt_names:
_snake_case : Union[str, Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_snake_case : List[str] = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
_snake_case : List[Any] = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
_snake_case : List[str] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(snake_case__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 132 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =13
lowerCamelCase_ =7
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =99
lowerCamelCase_ =384
lowerCamelCase_ =2
lowerCamelCase_ =4
lowerCamelCase_ =37
lowerCamelCase_ ='''gelu'''
lowerCamelCase_ =0.1
lowerCamelCase_ =0.1
lowerCamelCase_ =512
lowerCamelCase_ =16
lowerCamelCase_ =2
lowerCamelCase_ =0.0_2
lowerCamelCase_ =3
lowerCamelCase_ =4
lowerCamelCase_ =128
lowerCamelCase_ =2
lowerCamelCase_ =9
lowerCamelCase_ =1
lowerCamelCase_ =None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCAmelCase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModel(config=lowerCAmelCase )
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ =[input_ids, input_mask]
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertForMaskedLM(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFConvBertForSequenceClassification(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =TFConvBertForMultipleChoice(config=lowerCAmelCase )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase_ ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =TFConvBertForTokenClassification(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertForQuestionAnswering(config=lowerCAmelCase )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase : List[str] =(
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : int =False
lowercase : List[Any] =False
lowercase : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =True
lowerCamelCase_ =True
if hasattr(lowerCAmelCase, '''use_cache''' ):
lowerCamelCase_ =True
lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase )
for model_class in self.all_model_classes:
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =len(model(lowerCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase, saved_model=lowerCAmelCase )
lowerCamelCase_ =os.path.join(lowerCAmelCase, '''saved_model''', '''1''' )
lowerCamelCase_ =tf.keras.models.load_model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
if self.is_encoder_decoder:
lowerCamelCase_ =outputs['''encoder_hidden_states''']
lowerCamelCase_ =outputs['''encoder_attentions''']
else:
lowerCamelCase_ =outputs['''hidden_states''']
lowerCamelCase_ =outputs['''attentions''']
self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase )
lowerCamelCase_ =getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =True
lowerCamelCase_ =getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase )
lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase )
def check_decoder_attentions_output(lowerCAmelCase ):
lowerCamelCase_ =len(lowerCAmelCase )
self.assertEqual(out_len % 2, 0 )
lowerCamelCase_ =outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(lowerCAmelCase ):
lowerCamelCase_ =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
lowerCamelCase_ =len(lowerCAmelCase )
self.assertEqual(config.output_hidden_states, lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
if self.is_encoder_decoder:
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states, lowerCAmelCase )
check_decoder_attentions_output(lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase_ =True
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states, lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
# Check attention is always last and order is fine
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states, lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowerCamelCase_ =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =[1, 6, 768]
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase, atol=1e-4 )
| 75 |
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 358 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_( enum.Enum ):
'''simple docstring'''
__lowercase : Any = 0
__lowercase : List[str] = 1
__lowercase : List[Any] = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase__ : List[Any] = None
if self.model.config.prefix is not None:
lowerCAmelCase__ : Optional[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase__ : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._sanitize_parameters(prefix=__UpperCAmelCase ,**self._forward_params )
lowerCAmelCase__ : int = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase__ : Optional[Any] = {**self._forward_params, **forward_params}
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
lowerCAmelCase__ : List[Any] = {}
if prefix is not None:
lowerCAmelCase__ : Any = prefix
if prefix:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(
__UpperCAmelCase ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
lowerCAmelCase__ : int = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
""" [None, 'hole']""" )
lowerCAmelCase__ : int = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = generate_kwargs
lowerCAmelCase__ : Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase__ : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase__ : int = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase__ : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase__ : Tuple = self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase__ : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*__UpperCAmelCase ,**__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = self.tokenizer(
prefix + prompt_text ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
lowerCAmelCase__ : Optional[int] = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase__ : Tuple = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase__ : str = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase__ : List[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase__ : List[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCAmelCase__ : str = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase__ : Union[str, Any] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Tuple = model_inputs["""input_ids"""]
lowerCAmelCase__ : Dict = model_inputs.get("""attention_mask""" ,__UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = 1
else:
lowerCAmelCase__ : str = input_ids.shape[0]
lowerCAmelCase__ : List[Any] = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase__ : Union[str, Any] = generate_kwargs.pop("""prefix_length""" ,0 )
if prefix_length > 0:
lowerCAmelCase__ : Union[str, Any] = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase__ : Tuple = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase__ : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase__ : Optional[int] = self.model.generate(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : str = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase__ : Dict = generated_sequence.reshape(__UpperCAmelCase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase__ : Any = tf.reshape(__UpperCAmelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=ReturnType.FULL_TEXT ,__UpperCAmelCase=True ) -> str:
lowerCAmelCase__ : int = model_outputs["""generated_sequence"""][0]
lowerCAmelCase__ : Dict = model_outputs["""input_ids"""]
lowerCAmelCase__ : Tuple = model_outputs["""prompt_text"""]
lowerCAmelCase__ : Optional[Any] = generated_sequence.numpy().tolist()
lowerCAmelCase__ : List[str] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase__ : List[str] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase__ : Dict = self.tokenizer.decode(
__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase__ : Optional[int] = 0
else:
lowerCAmelCase__ : str = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase__ : Optional[int] = prompt_text + text[prompt_length:]
else:
lowerCAmelCase__ : List[str] = text[prompt_length:]
lowerCAmelCase__ : List[Any] = {"""generated_text""": all_text}
records.append(__UpperCAmelCase )
return records
| 184 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCAmelCase :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = """gelu"""
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=13 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Dict=99 , UpperCAmelCase : int=32 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=20 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Dict=0 , ) -> Dict:
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : str = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = eos_token_id
lowerCamelCase__ : Any = pad_token_id
lowerCamelCase__ : int = bos_token_id
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ : int = prepare_mbart_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = TFMBartModel(config=UpperCAmelCase ).get_decoder()
lowerCamelCase__ : int = inputs_dict['input_ids']
lowerCamelCase__ : Union[str, Any] = input_ids[:1, :]
lowerCamelCase__ : str = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ : Union[str, Any] = inputs_dict['head_mask']
lowerCamelCase__ : Union[str, Any] = 1
# first forward pass
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Any = outputs.to_tuple()
lowerCamelCase__ : Optional[int] = past_key_values[1]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if attention_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : str ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A_ ( self : Dict ) -> int:
lowerCamelCase__ : Dict = TFMBartModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
UpperCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
UpperCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
UpperCAmelCase__ = """facebook/mbart-large-en-ro"""
@cached_property
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Optional[Any] ) -> Any:
lowerCamelCase__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : List[Any] , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : List[str] = self.translate_src_text(**UpperCAmelCase )
self.assertListEqual(self.expected_text , UpperCAmelCase )
def A_ ( self : str , **UpperCAmelCase : int ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = self.tokenizer(self.src_text , **UpperCAmelCase , return_tensors='tf' )
lowerCamelCase__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCamelCase__ : str = self.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
return generated_words
@slow
def A_ ( self : str ) -> Union[str, Any]:
self._assert_generated_batch_equal_expected()
| 50 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : UNetaDModel
UpperCamelCase_ : KarrasVeScheduler
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : UNetaDModel , SCREAMING_SNAKE_CASE_ : KarrasVeScheduler ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : str , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 50 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
A: Any = self.unet.config.sample_size
A: List[Any] = (batch_size, 3, img_size, img_size)
A: Optional[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A: Union[str, Any] = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A: Optional[int] = self.scheduler.schedule[t]
A: Optional[int] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A , A: Optional[Any] = self.scheduler.add_noise_to_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A: Dict = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A: List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A: Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A: List[Any] = self.scheduler.step_correct(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , step_output.prev_sample , step_output['''derivative'''] , )
A: int = step_output.prev_sample
A: Tuple = (sample / 2 + 0.5).clamp(0 , 1 )
A: Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A: str = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 334 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase = 4 ) -> list[list[int]]:
A: Tuple = abs(__lowercase ) or 4
return [[1 + x + y * row_size for x in range(__lowercase )] for y in range(__lowercase )]
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
return reverse_row(transpose(__lowercase ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
return reverse_row(reverse_column(__lowercase ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
return reverse_column(transpose(__lowercase ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
A: Union[str, Any] = [list(__lowercase ) for x in zip(*__lowercase )]
return matrix
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
A: Optional[int] = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
A: Optional[Any] = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
for i in matrix:
print(*__lowercase )
if __name__ == "__main__":
UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 334 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE : Optional[int] = DetaConfig(
backbone_config=_a , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_a , with_box_refine=_a , two_stage=_a , )
# set labels
SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = 366
SCREAMING_SNAKE_CASE : Optional[Any] = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE : str = 91
SCREAMING_SNAKE_CASE : Tuple = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : int = num_labels
SCREAMING_SNAKE_CASE : str = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="dataset")) , "r"))
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[str] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias"))
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight"))
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight"))
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias"))
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight"))
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias"))
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight"))
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias"))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
# fmt: on
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = dct.pop(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = val
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
SCREAMING_SNAKE_CASE : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( _a , _a):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE : List[str] = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE : str = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = get_deta_config(_a)
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE : Any = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth")
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE : Any = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth")
else:
raise ValueError(f"Model name {model_name} not supported")
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_a , map_location="cpu")["model"]
# original state dict
for name, param in state_dict.items():
print(_a , param.shape)
# rename keys
SCREAMING_SNAKE_CASE : Optional[Any] = create_rename_keys(_a)
for src, dest in rename_keys:
rename_key(_a , _a , _a)
read_in_swin_q_k_v(_a , config.backbone_config)
read_in_decoder_q_k_v(_a , _a)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : str = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : str = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : str = DetaForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
SCREAMING_SNAKE_CASE : str = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_a)
# load image processor
SCREAMING_SNAKE_CASE : int = DetaImageProcessor(format="coco_detection")
# verify our conversion on image
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=_a , return_tensors="pt")
SCREAMING_SNAKE_CASE : List[Any] = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Dict = model(pixel_values.to(_a))
# verify logits
print("Logits:" , outputs.logits[0, :3, :3])
print("Boxes:" , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]])
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]])
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_a) , atol=1E-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_a) , atol=1E-4)
print("Everything ok!")
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub...")
model.push_to_hub(f"jozhang97/{model_name}")
processor.push_to_hub(f"jozhang97/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 76 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "vit_msn"
def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
| 8 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase_ = "src/transformers"
lowercase_ = "docs/source/en/tasks"
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A__ = f.readlines()
# Find the start prompt.
A__ = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE__ ):
start_index += 1
start_index += 1
A__ = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(TRANSFORMERS_PATH)
lowercase_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
'''simple docstring'''
A__ = TASK_GUIDE_TO_MODELS[task_guide]
A__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(SCREAMING_SNAKE_CASE__ , set() )
A__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
'''simple docstring'''
A__ , A__ , A__ , A__ = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
A__ = get_model_list_for_task(SCREAMING_SNAKE_CASE__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 282 |
import argparse
import struct
import unittest
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : bytes )-> None:
'''simple docstring'''
A__ = data
# Initialize hash values
A__ = [
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
A__ = [
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( lowercase_ : bytes )-> bytes:
'''simple docstring'''
A__ = B'\x80' + (B'\x00' * (6_3 - (len(lowercase_ ) + 8) % 6_4))
A__ = struct.pack('>Q',(len(lowercase_ ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self : Optional[int] )-> None:
'''simple docstring'''
A__ = [
self.preprocessed_data[x : x + 6_4]
for x in range(0,len(self.preprocessed_data ),6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L',lowercase_ ) )
# add 48 0-ed integers
words += [0] * 4_8
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0,6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 1_5],7 )
^ self.ror(words[index - 1_5],1_8 )
^ (words[index - 1_5] >> 3)
)
A__ = (
self.ror(words[index - 2],1_7 )
^ self.ror(words[index - 2],1_9 )
^ (words[index - 2] >> 1_0)
)
A__ = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A__ = self.ror(lowercase_,6 ) ^ self.ror(lowercase_,1_1 ) ^ self.ror(lowercase_,2_5 )
A__ = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A__ = self.ror(lowercase_,2 ) ^ self.ror(lowercase_,1_3 ) ^ self.ror(lowercase_,2_2 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(lowercase_ )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self : Union[str, Any],lowercase_ : int,lowercase_ : int )-> int:
'''simple docstring'''
return 0Xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> None:
'''simple docstring'''
import hashlib
A__ = bytes('Test String','utf-8' )
self.assertEqual(SHAaaa(lowercase_ ).hash,hashlib.shaaaa(lowercase_ ).hexdigest() )
def _snake_case( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash )
if __name__ == "__main__":
main()
| 282 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE_ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ = sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ = (1 - _cos) / 2
SCREAMING_SNAKE_CASE_ = 1 - _cos
SCREAMING_SNAKE_CASE_ = 1 + alpha
SCREAMING_SNAKE_CASE_ = -2 * _cos
SCREAMING_SNAKE_CASE_ = 1 - alpha
SCREAMING_SNAKE_CASE_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE_ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ = sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ = (1 + _cos) / 2
SCREAMING_SNAKE_CASE_ = -1 - _cos
SCREAMING_SNAKE_CASE_ = 1 + alpha
SCREAMING_SNAKE_CASE_ = -2 * _cos
SCREAMING_SNAKE_CASE_ = 1 - alpha
SCREAMING_SNAKE_CASE_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE_ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ = sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ = _sin / 2
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = -ba
SCREAMING_SNAKE_CASE_ = 1 + alpha
SCREAMING_SNAKE_CASE_ = -2 * _cos
SCREAMING_SNAKE_CASE_ = 1 - alpha
SCREAMING_SNAKE_CASE_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int = 1 / sqrt(2 ) ) -> IIRFilter:
SCREAMING_SNAKE_CASE_ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ = sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ = 1 - alpha
SCREAMING_SNAKE_CASE_ = -2 * _cos
SCREAMING_SNAKE_CASE_ = 1 + alpha
SCREAMING_SNAKE_CASE_ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] = 1 / sqrt(2 ) , ) -> IIRFilter:
SCREAMING_SNAKE_CASE_ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ = sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE_ = 1 + alpha * big_a
SCREAMING_SNAKE_CASE_ = -2 * _cos
SCREAMING_SNAKE_CASE_ = 1 - alpha * big_a
SCREAMING_SNAKE_CASE_ = 1 + alpha / big_a
SCREAMING_SNAKE_CASE_ = -2 * _cos
SCREAMING_SNAKE_CASE_ = 1 - alpha / big_a
SCREAMING_SNAKE_CASE_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] = 1 / sqrt(2 ) , ) -> IIRFilter:
SCREAMING_SNAKE_CASE_ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ = sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE_ = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ = 2 * sqrt(__lowerCAmelCase ) * alpha
SCREAMING_SNAKE_CASE_ = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE_ = 2 * big_a * mpc
SCREAMING_SNAKE_CASE_ = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE_ = ppmc + aaa
SCREAMING_SNAKE_CASE_ = -2 * pmpc
SCREAMING_SNAKE_CASE_ = ppmc - aaa
SCREAMING_SNAKE_CASE_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : str = 1 / sqrt(2 ) , ) -> IIRFilter:
SCREAMING_SNAKE_CASE_ = tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ = sin(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cos(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE_ = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ = 2 * sqrt(__lowerCAmelCase ) * alpha
SCREAMING_SNAKE_CASE_ = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE_ = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE_ = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE_ = pmc + aaa
SCREAMING_SNAKE_CASE_ = 2 * mpc
SCREAMING_SNAKE_CASE_ = pmc - aaa
SCREAMING_SNAKE_CASE_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 225 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a :Optional[int] = ["text", "image", "audio"]
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __a :
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 132 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Collection[float] | None = None ) -> None:
if components is None:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[Any] = list(lowerCAmelCase_ )
def __len__( self : Union[str, Any] ) -> int:
return len(self.__components )
def __str__( self : List[str] ) -> str:
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self : Dict , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : Optional[int] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("must have the same size" )
def __sub__( self : List[str] , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : List[str] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , lowerCAmelCase_ : float ) -> Vector:
...
@overload
def __mul__( self : Optional[int] , lowerCAmelCase_ : Vector ) -> float:
...
def __mul__( self : Dict , lowerCAmelCase_ : float | Vector ) -> float | Vector:
if isinstance(lowerCAmelCase_ , (float, int) ):
UpperCAmelCase_ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = len(self )
UpperCAmelCase_ : Dict = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("invalid operand!" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Vector:
return Vector(self.__components )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> float:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase_ : List[str] = value
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
UpperCAmelCase_ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Vector , lowerCAmelCase_ : bool = False ) -> float:
UpperCAmelCase_ : int = self * other
UpperCAmelCase_ : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( A__ ):
assert isinstance(A__ ,A__ )
return Vector([0] * dimension )
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ ) and (isinstance(A__ ,A__ ))
UpperCAmelCase_ : Any = [0] * dimension
UpperCAmelCase_ : Dict = 1
return Vector(A__ )
def snake_case ( A__ ,A__ ,A__ ):
assert (
isinstance(A__ ,A__ )
and isinstance(A__ ,A__ )
and (isinstance(A__ ,(int, float) ))
)
return x * scalar + y
def snake_case ( A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : Tuple = [random.randint(A__ ,A__ ) for _ in range(A__ )]
return Vector(A__ )
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : List[Any] = matrix
UpperCAmelCase_ : List[Any] = w
UpperCAmelCase_ : List[Any] = h
def __str__( self : int ) -> str:
UpperCAmelCase_ : Tuple = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : List[Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Optional[Any] = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int] , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Union[str, Any] = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : float ) -> Matrix:
...
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : Vector ) -> Vector:
...
def __mul__( self : Any , lowerCAmelCase_ : float | Vector ) -> Vector | Matrix:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
UpperCAmelCase_ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase_ : Any = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
UpperCAmelCase_ : int = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.__height
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.__width
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase_ : List[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
UpperCAmelCase_ : Optional[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("Indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase_ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def snake_case ( A__ ):
UpperCAmelCase_ : list[list[float]] = [[0] * n for _ in range(A__ )]
return Matrix(A__ ,A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : list[list[float]] = [
[random.randint(A__ ,A__ ) for _ in range(A__ )] for _ in range(A__ )
]
return Matrix(A__ ,A__ ,A__ )
| 253 |
"""simple docstring"""
from math import factorial
def snake_case ( A__ = 1_00 ):
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 253 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = """segformer"""
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[2, 2, 2, 2] , __lowerCAmelCase=[8, 4, 2, 1] , __lowerCAmelCase=[3_2, 6_4, 1_6_0, 2_5_6] , __lowerCAmelCase=[7, 3, 3, 3] , __lowerCAmelCase=[4, 2, 2, 2] , __lowerCAmelCase=[1, 2, 5, 8] , __lowerCAmelCase=[4, 4, 4, 4] , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=2_5_5 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __lowerCamelCase , )
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_encoder_blocks
lowerCamelCase__ = depths
lowerCamelCase__ = sr_ratios
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = patch_sizes
lowerCamelCase__ = strides
lowerCamelCase__ = mlp_ratios
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = decoder_hidden_size
lowerCamelCase__ = kwargs.get('''reshape_last_stage''' , __lowerCamelCase )
lowerCamelCase__ = semantic_loss_ignore_index
class __A ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1_2
| 209 |
class _lowercase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
lowerCamelCase__ : int = row
lowerCamelCase__ : Optional[Any] = col
lowerCamelCase__ : Union[str, Any] = graph
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[bool]] ):
'''simple docstring'''
lowerCamelCase__ : str = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCamelCase__ : Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCamelCase__ : List[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __lowerCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ): # And finally, count all islands.
'''simple docstring'''
lowerCamelCase__ : List[Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCamelCase__ : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
count += 1
return count
| 184 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : List[Any] = 16
_lowercase : List[str] = 32
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 ):
lowercase_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase_ : int = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : Optional[int] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : Union[str, Any] = 8
else:
lowercase_ : List[str] = None
return tokenizer.pad(
UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase_ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Tuple = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Optional[int] = mocked_dataloaders # noqa: F811
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase__ ) == "1":
lowercase_ : List[str] = 2
# Initialize accelerator
lowercase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : List[Any] = config["""lr"""]
lowercase_ : Optional[Any] = int(config["""num_epochs"""] )
lowercase_ : Optional[int] = int(config["""seed"""] )
lowercase_ : Any = int(config["""batch_size"""] )
lowercase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCAmelCase__ )
def inner_training_loop(UpperCAmelCase__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase__ )
lowercase_ : int = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate scheduler
lowercase_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ : Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : str = model(**UpperCAmelCase__ )
lowercase_ : Tuple = outputs.loss
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Tuple = model(**UpperCAmelCase__ )
lowercase_ : int = outputs.logits.argmax(dim=-1 )
lowercase_ : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowercase_ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase ( ):
lowercase_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase_ : int = parser.parse_args()
lowercase_ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 359 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : int , **lowercase_ : Any ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """decord""" )
self.check_model_type(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None ):
lowercase_ : Union[str, Any] = {}
if frame_sampling_rate is not None:
lowercase_ : Any = frame_sampling_rate
if num_frames is not None:
lowercase_ : Optional[Any] = num_frames
lowercase_ : Union[str, Any] = {}
if top_k is not None:
lowercase_ : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , lowercase_ : Union[str, List[str]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : Optional[int]=1 ):
if num_frames is None:
lowercase_ : List[Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase_ : Union[str, Any] = BytesIO(requests.get(lowercase_ ).content )
lowercase_ : Optional[Any] = VideoReader(lowercase_ )
videoreader.seek(0 )
lowercase_ : Tuple = 0
lowercase_ : List[Any] = num_frames * frame_sampling_rate - 1
lowercase_ : Optional[int] = np.linspace(lowercase_ , lowercase_ , num=lowercase_ , dtype=np.intaa )
lowercase_ : Optional[int] = videoreader.get_batch(lowercase_ ).asnumpy()
lowercase_ : Union[str, Any] = list(lowercase_ )
lowercase_ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str ):
lowercase_ : int = self.model(**lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=5 ):
if top_k > self.model.config.num_labels:
lowercase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ : str = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ : Optional[Any] = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ : Union[str, Any] = scores.tolist()
lowercase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : List[str] ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Optional[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 1
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = True
def __call__( self : str ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,snake_case : nn.Module ):
super().__init__()
SCREAMING_SNAKE_CASE =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
SCREAMING_SNAKE_CASE =len(snake_case ) + 1
feature_blocks.append((f'res{block_index}', v) )
SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case )
def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ):
return get_trunk_forward_outputs(
snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] ,snake_case : str ):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case )
SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) )
else:
SCREAMING_SNAKE_CASE =super().__getitem__(snake_case )
return val
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __getitem__( self : int ,snake_case : str ):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE =RegNetModel
else:
SCREAMING_SNAKE_CASE =RegNetForImageClassification
return val
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func()
SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ )
our_model.load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =(
our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1]
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ),
}
SCREAMING_SNAKE_CASE =NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' )
SCREAMING_SNAKE_CASE =model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE =model_state_dict['trunk']
model.load_state_dict(lowerCAmelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
SCREAMING_SNAKE_CASE =partial(
lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 1 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def a ( ):
'''simple docstring'''
A_ : int = 9
A_ : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : int = kruskal(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase__ ) == sorted(lowerCamelCase__ )
| 135 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase :Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=__UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , f'{split}_results.json' ) )
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_, A_, A_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_, A_, A_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : int = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
assert hasattr(lowerCamelCase__ , lowerCamelCase__ ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCamelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
A_ : int = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCamelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
A_ : List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCamelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
A_ : Union[str, Any] = SeqaSeqDataset
# Get datasets
A_ : Union[str, Any] = (
dataset_class(
lowerCamelCase__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
A_ : int = (
dataset_class(
lowerCamelCase__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
A_ : Tuple = (
dataset_class(
lowerCamelCase__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
A_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , lowerCamelCase__ ) if training_args.predict_with_generate else None
)
A_ : List[str] = SeqaSeqTrainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , data_args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , data_collator=SeqaSeqDataCollator(
lowerCamelCase__ , lowerCamelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , )
A_ : str = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
A_ : List[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
A_ : Any = train_result.metrics
A_ : str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowerCamelCase__ , training_args.output_dir )
all_metrics.update(lowerCamelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A_ : Tuple = trainer.evaluate(metric_key_prefix="""val""" )
A_ : str = data_args.n_val
A_ : Optional[Any] = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowerCamelCase__ , training_args.output_dir )
all_metrics.update(lowerCamelCase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
A_ : Any = trainer.predict(test_dataset=lowerCamelCase__ , metric_key_prefix="""test""" )
A_ : int = test_output.metrics
A_ : Tuple = data_args.n_test
if trainer.is_world_process_zero():
A_ : List[Any] = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowerCamelCase__ , training_args.output_dir )
all_metrics.update(lowerCamelCase__ )
if training_args.predict_with_generate:
A_ : List[Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
A_ : Tuple = lmap(str.strip , lowerCamelCase__ )
write_txt_file(lowerCamelCase__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowerCamelCase__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def a ( lowerCamelCase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 135 | 1 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase )
| 282 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''lxmert'''
_lowercase : Any = {}
def __init__( self: Any , UpperCamelCase_: List[Any]=30_522 , UpperCamelCase_: int=768 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Dict=9_500 , UpperCamelCase_: List[Any]=1_600 , UpperCamelCase_: List[Any]=400 , UpperCamelCase_: List[str]=3_072 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: List[str]=512 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Dict=1E-1_2 , UpperCamelCase_: List[Any]=9 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[Any]=5 , UpperCamelCase_: str=2_048 , UpperCamelCase_: Dict=4 , UpperCamelCase_: Any=6.67 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Any=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=True , **UpperCamelCase_: Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = num_qa_labels
lowercase__ = num_object_labels
lowercase__ = num_attr_labels
lowercase__ = l_layers
lowercase__ = x_layers
lowercase__ = r_layers
lowercase__ = visual_feat_dim
lowercase__ = visual_pos_dim
lowercase__ = visual_loss_normalizer
lowercase__ = task_matched
lowercase__ = task_mask_lm
lowercase__ = task_obj_predict
lowercase__ = task_qa
lowercase__ = visual_obj_loss
lowercase__ = visual_attr_loss
lowercase__ = visual_feat_loss
lowercase__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCamelCase_ )
| 93 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation('''gelu''' )
lowercase__ = get_activation('''gelu_10''' )
lowercase__ = torch_builtin(UpperCamelCase_ )
lowercase__ = geluaa(UpperCamelCase_ )
lowercase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = get_activation('''gelu''' )
lowercase__ = 1
lowercase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = acta.a
| 93 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase : List[Any] = datasets.load_iris()
lowerCAmelCase : List[str] = np.array(data['data'])
lowerCAmelCase : Any = np.array(data['target'])
lowerCAmelCase : Dict = data['target_names']
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = train_test_split(X, y)
def A_ ( a , a ):
"""simple docstring"""
return np.linalg.norm(np.array(a ) - np.array(a ) )
def A_ ( a , a , a , a , a=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = zip(a , a )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE_ : List[Any] = []
for data_point in data:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE_ : List[str] = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE_ : List[Any] = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 253 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCAmelCase : str = TypeVar('T')
lowerCAmelCase : Optional[Any] = Union[List[T], Tuple[T, ...]]
lowerCAmelCase : str = Union[T, List[T], Dict[str, T]]
lowerCAmelCase : Union[str, Any] = Union[str, bytes, os.PathLike]
| 253 | 1 |
import sys
from collections import defaultdict
class __lowercase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pos
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE_ : Optional[int] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * start + 1
else:
SCREAMING_SNAKE_CASE_ : Any = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE_ : Any = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp, tempa
SCREAMING_SNAKE_CASE_ : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __lowerCamelCase )
self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE_ : Any = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = heap[parent]
SCREAMING_SNAKE_CASE_ : Optional[int] = position[parent]
self.set_position(position[parent] , __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp
self.set_position(__lowerCamelCase , __lowerCamelCase )
break
SCREAMING_SNAKE_CASE_ : List[str] = parent
else:
SCREAMING_SNAKE_CASE_ : List[str] = val
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp
self.set_position(__lowerCamelCase , 0 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = len(__lowerCamelCase ) // 2 - 1
for i in range(__lowerCamelCase , -1 , -1 ):
self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , len(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = positions[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sys.maxsize
self.top_to_bottom(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase )
return temp
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : int = Heap()
SCREAMING_SNAKE_CASE_ : Dict = [0] * len(A__ )
SCREAMING_SNAKE_CASE_ : str = [-1] * len(A__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE_ : Any = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE_ : Dict = []
for vertex in range(len(A__ ) ):
distance_tv.append(sys.maxsize )
positions.append(A__ )
heap.node_position.append(A__ )
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = distance
heap.heapify(A__, A__ )
for _ in range(1, len(A__ ) ):
SCREAMING_SNAKE_CASE_ : Tuple = heap.delete_minimum(A__, A__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE_ : Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A__ )]
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = distance
heap.bottom_to_top(
A__, heap.get_position(A__ ), A__, A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCAmelCase__ : Dict =int(input('Enter number of edges: ').strip())
lowerCAmelCase__ : List[str] =defaultdict(list)
for _ in range(edges_number):
lowerCAmelCase__ : Any =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 368 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = key
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : Dict = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : str = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 162 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Union[str, Any] , *__lowercase : Dict , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Optional[int] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : List[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : str , *__lowercase : int , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : List[Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : List[Any] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Dict , *__lowercase : Dict , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : List[str] , *__lowercase : Dict , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__lowercase : Any , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Any , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : List[Any] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Union[str, Any] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Union[str, Any] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : List[str] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Union[str, Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[int] , *__lowercase : str , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : str , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[int] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[int] , *__lowercase : Any , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Union[str, Any] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Union[str, Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : List[str] , *__lowercase : Tuple , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : List[Any] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Dict , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : int , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Dict , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Union[str, Any] , *__lowercase : Union[str, Any] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Optional[int] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : Optional[Any] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
def lowerCamelCase__ ( *_A , **_A ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ["torch"] )
def lowerCamelCase__ ( *_A , **_A ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ["torch"] )
def lowerCamelCase__ ( *_A , **_A ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ["torch"] )
def lowerCamelCase__ ( *_A , **_A ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ["torch"] )
def lowerCamelCase__ ( *_A , **_A ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ["torch"] )
def lowerCamelCase__ ( *_A , **_A ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ["torch"] )
def lowerCamelCase__ ( *_A , **_A ):
'''simple docstring'''
requires_backends(lowerCamelCase_ , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : int , *__lowercase : Any , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Tuple , *__lowercase : str , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Union[str, Any] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : List[str] , *__lowercase : int , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Dict , *__lowercase : Any , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : str , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : List[str] , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Tuple , *__lowercase : int , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : str , *__lowercase : Dict , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : str , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Optional[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : List[str] , *__lowercase : int , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : Optional[int] , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : Dict , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Dict , *__lowercase : List[str] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : Any , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[Any] , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : str , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : str , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : Dict , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Dict , *__lowercase : Tuple , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[int] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[Any] , *__lowercase : Any , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Optional[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Dict , *__lowercase : List[Any] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : str , *__lowercase : Optional[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : Tuple , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Union[str, Any] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : int , *__lowercase : Dict , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : List[str] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : Dict , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Any , *__lowercase : Tuple , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Tuple , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Dict , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[Any] , *__lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : int , **__lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Any , *__lowercase : Any , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : int , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Union[str, Any] , *__lowercase : Optional[int] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : Any , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : List[Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__lowercase : Optional[Any] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Union[str, Any] , *__lowercase : Tuple , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : Optional[int] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Dict , *__lowercase : List[str] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : Any , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Tuple , *__lowercase : Union[str, Any] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Tuple , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : int , *__lowercase : Dict , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Tuple , *__lowercase : List[Any] , **__lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[int] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : List[str] , *__lowercase : List[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : int , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : int , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Union[str, Any] , *__lowercase : List[str] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : str , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Tuple , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[Any] , *__lowercase : str , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : int , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : int , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : Optional[Any] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Tuple , *__lowercase : Optional[int] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : str , *__lowercase : Dict , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : List[Any] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : int , *__lowercase : Optional[Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Union[str, Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Tuple , *__lowercase : Optional[int] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : Tuple , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Dict , *__lowercase : Tuple , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : List[Any] , *__lowercase : Optional[int] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Dict , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : str , *__lowercase : Optional[Any] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__lowercase : int , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : List[str] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[Any] , *__lowercase : List[str] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[int] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Dict , *__lowercase : str , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Dict , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : Dict , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[Any] , *__lowercase : int , **__lowercase : int ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : Union[str, Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : int , *__lowercase : Dict , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__lowercase : Tuple , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Any , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Any , *__lowercase : Tuple , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : List[Any] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__lowercase : str , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[int] , *__lowercase : Optional[int] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : Tuple , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : List[str] , *__lowercase : Any , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Any , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : int , *__lowercase : Dict , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Optional[int] , *__lowercase : Union[str, Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[Any] , *__lowercase : List[str] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : str , *__lowercase : List[str] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Optional[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Dict , *__lowercase : Any , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Dict , *__lowercase : Dict , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__lowercase : Tuple , **__lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : Any , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase ( metaclass=_a ):
'''simple docstring'''
lowerCAmelCase_ = ["""torch"""]
def __init__( self : Dict , *__lowercase : Dict , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def snake_case__ ( cls : str , *__lowercase : Union[str, Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def snake_case__ ( cls : List[str] , *__lowercase : Any , **__lowercase : str ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
| 187 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def snake_case (UpperCAmelCase__ ) -> Optional[int]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
A_ : Optional[int] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
def _a ( _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_lowerCamelCase , required=_lowerCamelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=_lowerCamelCase , required=_lowerCamelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=_lowerCamelCase , required=_lowerCamelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=_lowerCamelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=_lowerCamelCase , default=_lowerCamelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , ):
UpperCamelCase_: int = logging.get_logger('transformers-cli/converting' )
self._logger.info(f'''Loading model {model_type}''' )
UpperCamelCase_: int = model_type
UpperCamelCase_: List[Any] = tf_checkpoint
UpperCamelCase_: List[str] = pytorch_dump_output
UpperCamelCase_: Optional[Any] = config
UpperCamelCase_: str = finetuning_task_name
def _a ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase_: Optional[int] = self._tf_checkpoint
UpperCamelCase_: str = ''
else:
UpperCamelCase_: int = self._tf_checkpoint
UpperCamelCase_: List[str] = ''
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCamelCase , self._config , self._pytorch_dump_output , _lowerCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 361 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: int = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Any = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "blip_text_model"
def __init__( self : Optional[Any] , UpperCAmelCase : int=30524 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : List[Any]=768 , UpperCAmelCase : Optional[Any]=3072 , UpperCAmelCase : Dict=768 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Dict=8 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Tuple=30522 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[int]=102 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=True , **UpperCAmelCase : Optional[int] , ):
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , sep_token_id=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Tuple = encoder_hidden_size
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : int = projection_dim
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : int = is_decoder
__lowerCamelCase : Optional[int] = use_cache
@classmethod
def lowerCamelCase__ ( cls : str , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : List[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
__lowerCamelCase : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _snake_case ( a__ ):
snake_case__ = "blip_vision_model"
def __init__( self : Any , UpperCAmelCase : List[str]=768 , UpperCAmelCase : List[str]=3072 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : str=12 , UpperCAmelCase : List[str]=384 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : str=1E-5 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=1E-10 , **UpperCAmelCase : int , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Optional[Any] = projection_dim
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : List[Any] = patch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Optional[int] = attention_dropout
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : Any = hidden_act
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : List[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Tuple = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
__lowerCamelCase : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _snake_case ( a__ ):
snake_case__ = "blip"
snake_case__ = True
def __init__( self : str , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=512 , UpperCAmelCase : Union[str, Any]=2.6_5_9_2 , UpperCAmelCase : Any=256 , **UpperCAmelCase : Optional[int] , ):
super().__init__(**UpperCAmelCase )
if text_config is None:
__lowerCamelCase : int = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
__lowerCamelCase : Optional[Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
__lowerCamelCase : List[str] = BlipTextConfig(**UpperCAmelCase )
__lowerCamelCase : str = BlipVisionConfig(**UpperCAmelCase )
__lowerCamelCase : int = self.vision_config.hidden_size
__lowerCamelCase : Dict = projection_dim
__lowerCamelCase : Optional[Any] = logit_scale_init_value
__lowerCamelCase : Optional[Any] = 1.0
__lowerCamelCase : Optional[int] = 0.0_2
__lowerCamelCase : Tuple = image_text_hidden_size
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , UpperCAmelCase : BlipTextConfig , UpperCAmelCase : BlipVisionConfig , **UpperCAmelCase : List[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[int] = self.text_config.to_dict()
__lowerCamelCase : List[Any] = self.vision_config.to_dict()
__lowerCamelCase : List[Any] = self.__class__.model_type
return output
| 135 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LayoutLMv3FeatureExtractor''']
__A = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 135 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__A = logging.getLogger(__name__)
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = np.argmax(_lowercase , axis=1 )
return np.sum(outputs == labels )
def __A ( _lowercase ):
'''simple docstring'''
with open(_lowercase , encoding='''utf_8''' ) as f:
_A = csv.reader(_lowercase )
_A = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = []
for dataset in encoded_datasets:
_A = len(_lowercase )
_A = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_A = np.zeros((n_batch, 2) , dtype=np.intaa )
_A = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
_A = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = with_conta
_A = with_conta
_A = len(_lowercase ) - 1
_A = len(_lowercase ) - 1
_A = with_conta
_A = with_conta
_A = mc_label
_A = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_lowercase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_lowercase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_lowercase , default='''''' )
parser.add_argument('''--seed''' , type=_lowercase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_lowercase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_lowercase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_lowercase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_lowercase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_lowercase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_lowercase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_lowercase , default=6.25e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_lowercase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_lowercase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_lowercase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_lowercase , default=3_74 )
parser.add_argument('''--server_ip''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
_A = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_A = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_lowercase , _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_A = ['''_start_''', '''_delimiter_''', '''_classify_''']
_A = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
_A = tokenizer.convert_tokens_to_ids(_lowercase )
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(_lowercase ):
if isinstance(_lowercase , _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase , _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info('''Encoding dataset...''' )
_A = load_rocstories_dataset(args.train_dataset )
_A = load_rocstories_dataset(args.eval_dataset )
_A = (train_dataset, eval_dataset)
_A = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
_A = model.config.n_positions // 2 - 2
_A = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_A = min(_lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_A = pre_process_datasets(_lowercase , _lowercase , _lowercase , *_lowercase )
_A ,_A = tensor_datasets[0], tensor_datasets[1]
_A = TensorDataset(*_lowercase )
_A = RandomSampler(_lowercase )
_A = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.train_batch_size )
_A = TensorDataset(*_lowercase )
_A = SequentialSampler(_lowercase )
_A = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_A = args.max_steps
_A = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
_A = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
_A = list(model.named_parameters() )
_A = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
_A = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
_A = AdamW(_lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
_A = get_linear_schedule_with_warmup(
_lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowercase )
if args.do_train:
_A ,_A ,_A = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
_A = 0
_A = 0
_A = tqdm(_lowercase , desc='''Training''' )
for step, batch in enumerate(_lowercase ):
_A = tuple(t.to(_lowercase ) for t in batch )
_A ,_A ,_A ,_A = batch
_A = model(_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
_A = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_A = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_A = '''Training loss: {:.2e} lr: {:.2e}'''.format(_lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_A = model.module if hasattr(_lowercase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_A = os.path.join(args.output_dir , _lowercase )
_A = os.path.join(args.output_dir , _lowercase )
torch.save(model_to_save.state_dict() , _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_A = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
_A ,_A = 0, 0
_A ,_A = 0, 0
for batch in tqdm(_lowercase , desc='''Evaluating''' ):
_A = tuple(t.to(_lowercase ) for t in batch )
_A ,_A ,_A ,_A = batch
with torch.no_grad():
_A ,_A ,_A ,_A = model(
_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
_A = mc_logits.detach().cpu().numpy()
_A = mc_labels.to('''cpu''' ).numpy()
_A = accuracy(_lowercase , _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_A = eval_loss / nb_eval_steps
_A = eval_accuracy / nb_eval_examples
_A = tr_loss / nb_tr_steps if args.do_train else None
_A = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
_A = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _lowercase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 358 |
__A = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 75 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.