code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase : int = False
UpperCAmelCase : int = True
UpperCAmelCase : List[Any] = False
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase : Any = parser.parse_args()
UpperCAmelCase : Any = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
UpperCAmelCase : int = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
UpperCAmelCase : Optional[Any] = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
UpperCAmelCase : Optional[int] = reader.read()
UpperCAmelCase : Union[str, Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
UpperCAmelCase : Optional[Any] = UNetaDModel(**config)
else:
UpperCAmelCase : Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
UpperCAmelCase : int = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase : Dict = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase : Dict = config[key]
del config[key]
UpperCAmelCase : List[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
UpperCAmelCase : List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
UpperCAmelCase : List[Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
UpperCAmelCase : Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
UpperCAmelCase : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
UpperCAmelCase : Optional[Any] = param_value
UpperCAmelCase : Dict = True
if not has_changed:
UpperCAmelCase : Optional[int] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 115 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
UpperCAmelCase : str = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : int="<mask>" , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 115 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
lowerCamelCase_ = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
_A = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
_A = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]="replace" , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Optional[int]="<mask>" , __UpperCAmelCase : Optional[Any]=False , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = tuple(__UpperCAmelCase )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = " ".join(__UpperCAmelCase )
_A = word
return word
def lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = []
for token in re.findall(self.pat , __UpperCAmelCase ):
_A = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=False , **__UpperCAmelCase : str ):
'''simple docstring'''
_A = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
_A = " " + text
return (text, kwargs)
| 357 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> jnp.ndarray:
'''simple docstring'''
_A = jnp.zeros_like(__lowercase )
_A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_A = shifted_input_ids.at[:, 0].set(__lowercase )
_A = jnp.where(shifted_input_ids == -100 , __lowercase , __lowercase )
return shifted_input_ids
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
| 174 | 0 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 |
'''simple docstring'''
from __future__ import annotations
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if len(a__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(a__ )
or left < -len(a__ )
or right >= len(a__ )
or right < -len(a__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__SCREAMING_SNAKE_CASE = (left + right) >> 1 # the middle
__SCREAMING_SNAKE_CASE = find_max(a__ , a__ , a__ ) # find max in range[left, mid]
__SCREAMING_SNAKE_CASE = find_max(a__ , mid + 1 , a__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 267 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Dict = {
"allenai/led-base-16384": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCamelCase = bs[:]
lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
lowerCamelCase = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = set()
lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase = char
return pairs
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Tuple:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase = json.load(A )
lowerCamelCase = {v: k for k, v in self.encoder.items()}
lowerCamelCase = errors # how to handle errors in decoding
lowerCamelCase = bytes_to_unicode()
lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase = {}
lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __A ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , A ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase = tuple(A )
lowerCamelCase = get_pairs(A )
if not pairs:
return token
while True:
lowerCamelCase = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase = bigram
lowerCamelCase = []
lowerCamelCase = 0
while i < len(A ):
try:
lowerCamelCase = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase = tuple(A )
lowerCamelCase = new_word
if len(A ) == 1:
break
else:
lowerCamelCase = get_pairs(A )
lowerCamelCase = """ """.join(A )
lowerCamelCase = word
return word
def __A ( self , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = []
for token in re.findall(self.pat , A ):
lowerCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def __A ( self , A ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(A )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """""".join(A )
lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
lowerCamelCase = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A , A=False , **A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
lowerCamelCase = """ """ + text
return (text, kwargs)
def __A ( self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
'''simple docstring'''
lowerCamelCase = super()._pad(
encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
lowerCamelCase = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 66 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "switch_transformers"
UpperCamelCase : Tuple = ["past_key_values"]
UpperCamelCase : Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , A=3_21_28 , A=7_68 , A=64 , A=20_48 , A=64 , A=12 , A=3 , A=12 , A=3 , A=12 , A=8 , A=False , A=0.01 , A="float32" , A=False , A=32 , A=1_28 , A=0.1 , A=1e-6 , A=0.001 , A=0.001 , A=1.0 , A="relu" , A=True , A=False , A=True , A=0 , A=1 , **A , ) -> str:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = d_model
lowerCamelCase = d_kv
lowerCamelCase = d_ff
lowerCamelCase = num_sparse_encoder_layers
lowerCamelCase = num_layers
lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase = num_heads
lowerCamelCase = num_experts
lowerCamelCase = expert_capacity
lowerCamelCase = router_bias
lowerCamelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCamelCase = router_dtype
lowerCamelCase = router_ignore_padding_tokens
lowerCamelCase = relative_attention_num_buckets
lowerCamelCase = relative_attention_max_distance
lowerCamelCase = dropout_rate
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_factor
lowerCamelCase = feed_forward_proj
lowerCamelCase = use_cache
lowerCamelCase = add_router_probs
lowerCamelCase = router_z_loss_coef
lowerCamelCase = router_aux_loss_coef
lowerCamelCase = self.feed_forward_proj.split("""-""" )
lowerCamelCase = act_info[-1]
lowerCamelCase = act_info[0] == """gated"""
if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase = """gelu_new"""
super().__init__(
pad_token_id=A , eos_token_id=A , is_encoder_decoder=A , **A , )
| 66 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =['torch', 'scipy']
def __init__( self : Any , *__a : List[Any] , **__a : Any ):
requires_backends(self , ["torch", "scipy"] )
@classmethod
def UpperCamelCase__ ( cls : Any , *__a : Optional[int] , **__a : Optional[int] ):
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , *__a : Tuple , **__a : Any ):
requires_backends(cls , ["torch", "scipy"] )
| 63 |
def __lowercase ( __lowerCAmelCase : list[int] ):
a__ = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
a__ = nums.pop(0 )
a__ = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def __lowercase ( __lowerCAmelCase : Optional[int] ):
def backtrack(__lowerCAmelCase : Any ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
a__ , a__ = nums[i], nums[start]
backtrack(start + 1 )
a__ , a__ = nums[i], nums[start] # backtrack
a__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case : Optional[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 240 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCamelCase : Union[str, Any] = object()
# For specifying empty leaf dict `{}`
_lowerCamelCase : Optional[Any] = object()
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ ) + 1 ):
SCREAMING_SNAKE_CASE__ : List[str] = [x.match(SCREAMING_SNAKE_CASE__ ) for x, y in zip(SCREAMING_SNAKE_CASE__ , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE__ ):
return True
return False
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return replacement
return val
return replace
def _a ( ) -> Dict:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE__ )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = _get_partition_rules()
SCREAMING_SNAKE_CASE__ : List[str] = _replacement_rules(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE__ )}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE__ ) )
| 356 |
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("String lengths must match!" )
SCREAMING_SNAKE_CASE__ : Dict = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : str = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __snake_case ( UpperCamelCase_ ):
_a = '''big_bird'''
def __init__( self : List[Any] , A_ : Optional[Any]=5_0_3_5_8 , A_ : List[str]=7_6_8 , A_ : Any=1_2 , A_ : List[Any]=1_2 , A_ : int=3_0_7_2 , A_ : int="gelu_new" , A_ : int=0.1 , A_ : Optional[int]=0.1 , A_ : str=4_0_9_6 , A_ : Optional[int]=2 , A_ : str=0.02 , A_ : Union[str, Any]=1e-12 , A_ : List[str]=True , A_ : Tuple=0 , A_ : Optional[Any]=1 , A_ : Tuple=2 , A_ : Union[str, Any]=6_6 , A_ : Any="block_sparse" , A_ : int=True , A_ : Optional[Any]=False , A_ : List[Any]=6_4 , A_ : Any=3 , A_ : Optional[Any]=None , **A_ : Tuple , ):
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : Optional[int] = rescale_embeddings
lowerCAmelCase_ : List[Any] = attention_type
lowerCAmelCase_ : Dict = use_bias
lowerCAmelCase_ : Tuple = block_size
lowerCAmelCase_ : str = num_random_blocks
lowerCAmelCase_ : Dict = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Dict):
if self.task == "multiple-choice":
lowerCAmelCase_ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __snake_case :
def __init__( self : Tuple , A_ : Any , A_ : Tuple=1_3 , A_ : str=7 , A_ : Any=True , A_ : Union[str, Any]=True , A_ : int=False , A_ : int=True , A_ : List[Any]=9_9 , A_ : Dict=6_4 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=6_4 , A_ : str="gelu" , A_ : Union[str, Any]=0.1 , A_ : List[Any]=0.1 , A_ : Any=5_1_2 , A_ : Union[str, Any]=1_6 , A_ : str=2 , A_ : Any=0.02 , A_ : str=3 , A_ : Optional[int]=4 , A_ : int=None , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Dict):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''')
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , A_ : Dict , A_ : int , A_ : Tuple , A_ : List[str] , A_ : str , A_ : List[Any]):
lowerCAmelCase_ : int = MPNetModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Any = model(A_ , A_)
lowerCAmelCase_ : Union[str, Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any):
lowerCAmelCase_ : Any = MPNetForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : List[str] , A_ : Optional[Any] , A_ : Dict , A_ : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Any = MPNetForSequenceClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Dict = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : Dict , A_ : List[str] , A_ : List[Any]):
lowerCAmelCase_ : int = self.num_choices
lowerCAmelCase_ : List[str] = MPNetForMultipleChoice(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : Optional[int] = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Any , A_ : int , A_ : Any , A_ : List[Any] , A_ : Any , A_ : Union[str, Any]):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Tuple = MPNetForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = True
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = MPNetModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''')
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : Union[str, Any] = model(A_)[0]
lowerCAmelCase_ : Optional[int] = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , A_)
lowerCAmelCase_ : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4))
| 103 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return "".join(sorted(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
return word_by_signature[signature(__UpperCAmelCase )]
__A = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__A = sorted({word.strip().lower() for word in data.splitlines()})
__A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 364 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE( __A ):
"""simple docstring"""
lowerCamelCase__ = 'levit'
def __init__( self : Tuple , __snake_case : Any=224 , __snake_case : Union[str, Any]=3 , __snake_case : Optional[int]=3 , __snake_case : Tuple=2 , __snake_case : List[str]=1 , __snake_case : Dict=16 , __snake_case : Any=[128, 256, 384] , __snake_case : Union[str, Any]=[4, 8, 12] , __snake_case : Optional[int]=[4, 4, 4] , __snake_case : Optional[Any]=[16, 16, 16] , __snake_case : Any=0 , __snake_case : Tuple=[2, 2, 2] , __snake_case : Any=[2, 2, 2] , __snake_case : Union[str, Any]=0.02 , **__snake_case : Union[str, Any] , ) -> Any:
super().__init__(**__lowercase )
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : Union[str, Any] = kernel_size
UpperCAmelCase : int = stride
UpperCAmelCase : List[str] = padding
UpperCAmelCase : Dict = hidden_sizes
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = depths
UpperCAmelCase : Tuple = key_dim
UpperCAmelCase : List[Any] = drop_path_rate
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : str = attention_ratio
UpperCAmelCase : List[str] = mlp_ratio
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : List[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE( __A ):
"""simple docstring"""
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A ( self : int ) -> Union[str, Any]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Any ) -> Any:
return 1E-4
| 23 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCAmelCase : Union[str, Any] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_UpperCAmelCase : int = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
_UpperCAmelCase : Any = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , ):
__lowerCAmelCase = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCAmelCase = [[refs[i] for refs in references] for i in range(__lowercase )]
__lowerCAmelCase = TER(
normalized=__lowercase , no_punct=__lowercase , asian_support=__lowercase , case_sensitive=__lowercase , )
__lowerCAmelCase = sb_ter.corpus_score(__lowercase , __lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 174 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(_lowercase , np.ndarray ):
return list(tensor.shape )
_a : Any = tf.shape(_lowercase )
if tensor.shape == tf.TensorShape(_lowercase ):
return dynamic
_a : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_lowercase )]
def UpperCAmelCase_ (__a : tf.Tensor , __a : Optional[int] = None , __a : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowercase , name=_lowercase )
def UpperCAmelCase_ (__a : List[Any] , __a : Optional[int] , __a : Tuple , __a : Any=1e-5 , __a : Union[str, Any]=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowercase , _lowercase ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_a : Union[str, Any] = tf.nn.moments(_lowercase , axes=[axis] , keepdims=_lowercase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_a : str = [1] * inputs.shape.rank
_a : Tuple = shape_list(_lowercase )[axis]
_a : Optional[int] = tf.reshape(_lowercase , _lowercase )
_a : Dict = tf.reshape(_lowercase , _lowercase )
# Compute layer normalization using the batch_normalization
# function.
_a : Optional[int] = tf.nn.batch_normalization(
_lowercase , _lowercase , _lowercase , offset=_lowercase , scale=_lowercase , variance_epsilon=_lowercase , )
return outputs
def UpperCAmelCase_ (__a : Any , __a : int=0 , __a : Optional[Any]=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_a : int = tf.shape(_lowercase )
_a : str = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_a : Optional[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_lowercase , _lowercase )
def UpperCAmelCase_ (__a : tf.Tensor ):
"""simple docstring"""
if not isinstance(_lowercase , tf.Tensor ):
_a : Union[str, Any] = tf.convert_to_tensor(_lowercase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_a : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_a : List[str] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_a : Union[str, Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ (__a : tf.Tensor , __a : int , __a : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
_lowercase , tf.cast(_lowercase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_lowercase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def UpperCAmelCase_ (__a : int , __a : Dict , __a : str ):
"""simple docstring"""
_a : int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_a : Any = [x for x in data if len(_lowercase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_a : Any = np.asarray(_lowercase )
_a : List[Any] = 1
_a : Optional[int] = np.array_split(_lowercase , _lowercase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_a : int = np.array_split(_lowercase , _lowercase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_lowercase ):
_a : Dict = chunk_data
else:
_a : Union[str, Any] = data
def UpperCAmelCase_ (__a : List[Any] , __a : Optional[int] ):
"""simple docstring"""
if name in group.attrs:
_a : int = [n.decode('utf8' ) if hasattr(_lowercase , 'decode' ) else n for n in group.attrs[name]]
else:
_a : List[str] = []
_a : Dict = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(_lowercase , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
def _expand_single_ad_tensor(__a : Optional[Any] ):
if isinstance(_lowercase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_lowercase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _lowercase )
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def A_ ( _lowercase ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case_ :Union[str, Any] = key.replace("""heads.cmd.mim_head.cls.predictions""", """mmm_image_head""" )
snake_case_ :str = key.replace("""heads.cmd.mlm_head.cls.predictions""", """mmm_text_head""" )
snake_case_ :Optional[Any] = key.replace("""heads.cmd.itm_head.cls""", """itm_head""" )
snake_case_ :Tuple = key.replace("""heads.cmd.itm_head.pooler""", """itm_head.pooler""" )
snake_case_ :int = key.replace("""heads.cmd.clip_head.logit_scale""", """flava.logit_scale""" )
snake_case_ :str = key.replace("""heads.fairseq_mlm.cls.predictions""", """mlm_head""" )
snake_case_ :Tuple = key.replace("""heads.imagenet.mim_head.cls.predictions""", """mim_head""" )
snake_case_ :Optional[int] = key.replace("""mm_text_projection""", """flava.text_to_mm_projection""" )
snake_case_ :List[str] = key.replace("""mm_image_projection""", """flava.image_to_mm_projection""" )
snake_case_ :str = key.replace("""image_encoder.module""", """flava.image_model""" )
snake_case_ :List[Any] = key.replace("""text_encoder.module""", """flava.text_model""" )
snake_case_ :str = key.replace("""mm_encoder.module.encoder.cls_token""", """flava.multimodal_model.cls_token""" )
snake_case_ :Any = key.replace("""mm_encoder.module""", """flava.multimodal_model""" )
snake_case_ :List[str] = key.replace("""text_projection""", """flava.text_projection""" )
snake_case_ :List[str] = key.replace("""image_projection""", """flava.image_projection""" )
snake_case_ :str = value.float()
for key, value in codebook_state_dict.items():
snake_case_ :Optional[int] = value
return upgrade
@torch.no_grad()
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase=None ):
'''simple docstring'''
if config_path is not None:
snake_case_ :int = FlavaConfig.from_pretrained(_lowercase )
else:
snake_case_ :int = FlavaConfig()
snake_case_ :Optional[Any] = FlavaForPreTraining(_lowercase ).eval()
snake_case_ :Any = convert_dalle_checkpoint(_lowercase, _lowercase, save_checkpoint=_lowercase )
if os.path.exists(_lowercase ):
snake_case_ :List[str] = torch.load(_lowercase, map_location="""cpu""" )
else:
snake_case_ :Optional[int] = torch.hub.load_state_dict_from_url(_lowercase, map_location="""cpu""" )
snake_case_ :List[Any] = upgrade_state_dict(_lowercase, _lowercase )
hf_model.load_state_dict(_lowercase )
snake_case_ :Optional[int] = hf_model.state_dict()
snake_case_ :int = count_parameters(_lowercase )
snake_case_ :Optional[Any] = count_parameters(_lowercase ) + count_parameters(_lowercase )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__a = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 66 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( _lowercase ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :Tuple = False
elif args.student_type == "gpt2":
snake_case_ :Union[str, Any] = False
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :List[str] = False
def A_ ( ):
'''simple docstring'''
snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", )
parser.add_argument(
"""--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", )
parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" )
parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", )
parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", )
parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", )
parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", )
parser.add_argument(
"""--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", )
parser.add_argument(
"""--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", )
parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", )
parser.add_argument(
"""--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", )
parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", )
parser.add_argument(
"""--fp16_opt_level""", type=_lowercase, default="""O1""", help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
), )
parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" )
parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" )
snake_case_ :Tuple = parser.parse_args()
sanity_checks(_lowercase )
# ARGS #
init_gpu_params(_lowercase )
set_seed(_lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f:
json.dump(vars(_lowercase ), _lowercase, indent=4 )
git_log(args.dump_path )
snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type]
snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case_ :Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase )
snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
snake_case_ :str = special_tok_ids
snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file, """rb""" ) as fp:
snake_case_ :str = pickle.load(_lowercase )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts, """rb""" ) as fp:
snake_case_ :Optional[Any] = pickle.load(_lowercase )
snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case_ :Optional[int] = 0.0 # do not predict special tokens
snake_case_ :int = torch.from_numpy(_lowercase )
else:
snake_case_ :List[str] = None
snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config )
snake_case_ :Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase )
else:
snake_case_ :Optional[int] = student_model_class(_lowercase )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowercase, _lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowercase, _lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case_ :Optional[int] = Distiller(
params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 66 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303 | 1 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
A__ = [1, 2, 3]
with pytest.raises(a_ ):
with parallel_backend("unsupported backend" ):
map_nested(a_ , a_ , num_proc=2 )
with pytest.raises(a_ ):
with parallel_backend("unsupported backend" ):
map_nested(a_ , a_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = [1, 2]
A__ = {'''a''': 1, '''b''': 2}
A__ = {'''a''': [1, 2], '''b''': [3, 4]}
A__ = {'''a''': {'''1''': 1}, '''b''': 2}
A__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
A__ = [2, 3]
A__ = {'''a''': 2, '''b''': 3}
A__ = {'''a''': [2, 3], '''b''': [4, 5]}
A__ = {'''a''': {'''1''': 2}, '''b''': 3}
A__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend("spark" ):
assert map_nested(a_ , a_ , num_proc=a_ ) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_ ) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_ ) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_ ) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_ ) == expected_map_nested_sa
| 247 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None
class _SCREAMING_SNAKE_CASE( A , A ):
SCREAMING_SNAKE_CASE_ : Any = 2
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE__ = 0.0_2 ,SCREAMING_SNAKE_CASE__ = 1_00 ,SCREAMING_SNAKE_CASE__ = 1.0_0_7 ,SCREAMING_SNAKE_CASE__ = 80 ,SCREAMING_SNAKE_CASE__ = 0.0_5 ,SCREAMING_SNAKE_CASE__ = 50 ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = sigma_max
# setable values
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :np.IntTensor = None
__SCREAMING_SNAKE_CASE :torch.FloatTensor = None # sigma(t_i)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = num_inference_steps
__SCREAMING_SNAKE_CASE :int = np.arange(0 ,self.num_inference_steps )[::-1].copy()
__SCREAMING_SNAKE_CASE :Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ ,dtype=torch.floataa ,device=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__SCREAMING_SNAKE_CASE :List[str] = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
__SCREAMING_SNAKE_CASE :Optional[int] = self.config.s_noise * randn_tensor(sample.shape ,generator=SCREAMING_SNAKE_CASE__ ).to(sample.device )
__SCREAMING_SNAKE_CASE :List[str] = sigma + gamma * sigma
__SCREAMING_SNAKE_CASE :str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = True ,) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = sample_hat + sigma_hat * model_output
__SCREAMING_SNAKE_CASE :Tuple = (sample_hat - pred_original_sample) / sigma_hat
__SCREAMING_SNAKE_CASE :List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE__ ,derivative=SCREAMING_SNAKE_CASE__ ,pred_original_sample=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = True ,) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = sample_prev + sigma_prev * model_output
__SCREAMING_SNAKE_CASE :List[Any] = (sample_prev - pred_original_sample) / sigma_prev
__SCREAMING_SNAKE_CASE :Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE__ ,derivative=SCREAMING_SNAKE_CASE__ ,pred_original_sample=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
| 191 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def __UpperCamelCase ( *A_ , **A_ ) -> Any:
"""simple docstring"""
pass
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = DepthEstimationPipeline(model=A_ , image_processor=A_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , A_ )
import datasets
UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
UpperCamelCase = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , A_ , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 'Intel/dpt-large'
UpperCamelCase = pipeline('depth-estimation' , model=A_ )
UpperCamelCase = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCamelCase = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 110 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ = 768 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
UpperCamelCase = nn.Parameter(torch.ones(1 , A_ ) )
def __UpperCamelCase ( self , A_ = None , A_ = None , ) -> Any:
"""simple docstring"""
UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 110 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
def __init__( self: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[Any]=7 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: str=True , UpperCamelCase__: Dict=99 , UpperCamelCase__: Optional[Any]=32 , UpperCamelCase__: Dict=5 , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: str=2 , UpperCamelCase__: Any=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Tuple = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : str = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : List[str] = num_choices
lowerCamelCase__ : Tuple = scope
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : str = None
if self.use_token_type_ids:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: str ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: int ):
lowerCamelCase__ : Union[str, Any] = NystromformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Any = NystromformerForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = NystromformerForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : Tuple = NystromformerForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = NystromformerForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = self.num_choices
lowerCamelCase__ : Any = NystromformerForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase__ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
a = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Any = NystromformerModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Any = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = NystromformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[Any] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCamelCase__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : int = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = """the [MASK] of Belgium is Brussels"""
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCamelCase__ : List[str] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCamelCase__ : str = tokenizer(UpperCamelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(encoding.input_ids ).logits
lowerCamelCase__ : Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
| 41 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( a__ , unittest.TestCase ):
"""simple docstring"""
A : Tuple = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple=0):
"""simple docstring"""
a : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE_))
a : str = torch.manual_seed(SCREAMING_SNAKE_CASE_)
a : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Union[str, Any] = self.get_dummy_inputs()
a : Optional[int] = pipe(**SCREAMING_SNAKE_CASE_).images
a : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE_)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : List[str] = self.get_dummy_inputs()
a : int = pipe(**SCREAMING_SNAKE_CASE_).images
a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Tuple = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Optional[int] = self.get_dummy_inputs()
a : Dict = pipe(**SCREAMING_SNAKE_CASE_).images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : List[Any] = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Optional[int] = self.get_dummy_inputs()
a : List[str] = pipe(**SCREAMING_SNAKE_CASE_).images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : int = self.get_dummy_inputs()
a : List[str] = pipe(**SCREAMING_SNAKE_CASE_).images
a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ort.SessionOptions()
a : str = False
return options
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a : Any = init_image.resize((1_2_8, 1_2_8))
# using the PNDM scheduler by default
a : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Any = 'A fantasy landscape, trending on artstation'
a : int = torch.manual_seed(0)
a : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
a : Union[str, Any] = output.images
a : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a : List[str] = init_image.resize((1_2_8, 1_2_8))
a : List[Any] = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler')
a : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Optional[int] = 'A fantasy landscape, trending on artstation'
a : List[Any] = torch.manual_seed(0)
a : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
a : Dict = output.images
a : Dict = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 365 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''pixel_values''']
def __init__( self : Dict , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 255 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = size if size is not None else {"shortest_edge": 384}
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = do_resize
_A = size
# Default value set here for backwards compatibility where the value in config is None
_A = crop_pct if crop_pct is not None else 224 / 256
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : float , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Optional[Any] , ):
'''simple docstring'''
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_A = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_A = int(shortest_edge / crop_pct )
_A = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = do_resize if do_resize is not None else self.do_resize
_A = crop_pct if crop_pct is not None else self.crop_pct
_A = resample if resample is not None else self.resample
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_A = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
_A = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_A = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_A = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_A = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 79 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 0 |
'''simple docstring'''
def a ( __a , __a ) -> Optional[Any]:
'''simple docstring'''
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(__a , __a ) ) )
def a ( __a ) -> List[Any]:
'''simple docstring'''
if point:
if isinstance(__a , __a ):
for item in point:
if not isinstance(__a , (int, float) ):
UpperCamelCase__ :int = (
'Expected a list of numbers as input, found '
f'''{type(__a ).__name__}'''
)
raise TypeError(__a )
else:
UpperCamelCase__ :Optional[int] = f'''Expected a list of numbers as input, found {type(__a ).__name__}'''
raise TypeError(__a )
else:
raise ValueError('''Missing an input''' )
def a ( __a , __a ) -> Dict:
'''simple docstring'''
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(__a , __a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
from math import ceil
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = list(range(0 , __a ) )
UpperCamelCase__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ :Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__a )
# Missing blocks
UpperCamelCase__ :List[str] = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__a ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__a ) )
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = list(range(__a ) )
UpperCamelCase__ :Any = int(ceil(n_layers / len(__a ) ) )
UpperCamelCase__ :List[Any] = [layers[i : i + n_blocks] for i in range(0 , __a , __a )]
return dict(zip(__a , __a ) )
| 219 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303 | 1 |
SCREAMING_SNAKE_CASE : Optional[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
lowercase_ :Dict = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
SCREAMING_SNAKE_CASE : list[bool | None] = [None] * 10_000_000
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = False
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ :List[Any] = chain(next_number(_a ) )
lowercase_ :List[Any] = number_chain
while number < 1_0_0_0_0_0_0_0:
lowercase_ :Optional[Any] = number_chain
number *= 1_0
return number_chain
def UpperCamelCase ( _a = 1_0_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
for i in range(1 , _a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 252 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
SCREAMING_SNAKE_CASE : int = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252 | 1 |
import pprint
import requests
lowerCAmelCase = 'https://zenquotes.io/api'
def _a ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _a ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowerCAmelCase = random_quotes()
pprint.pprint(response)
| 110 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 110 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a =HfApi()
a ={}
# fmt: off
a =torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
a =torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
a =torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
a =torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
a =torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
a =torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
a =torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
a =torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
a =torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
a =torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
a =torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
a =torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
a =torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
a =torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
a =torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
a =api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a ="""/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
a =UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a =UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a =torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a =model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 113 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
__lowerCamelCase : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), F"{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
try:
__lowerCamelCase : List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = "student" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase : int = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"teacher must be a model or string got type {type(lowerCamelCase__ )}"
__lowerCamelCase : str = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase : Dict = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase : Optional[int] = teacher_e
if d is None:
__lowerCamelCase : Optional[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__lowerCamelCase , __lowerCamelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase : Any = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase : Union[str, Any] = teacher_e
if d is None:
__lowerCamelCase : Any = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
__lowerCamelCase : str = teacher.config_class(**lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__lowerCamelCase : Dict = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 113 | 1 |
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase( __UpperCamelCase : np.ndarray ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : bool ,):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = grid.shape
lowerCAmelCase_ : int = [-1, 1, 0, 0]
lowerCAmelCase_ : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = [(0, source)], set()
lowerCAmelCase_ : int = np.full((rows, cols) ,np.inf )
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = np.empty((rows, cols) ,dtype=__UpperCamelCase )
lowerCAmelCase_ : Any = None
while queue:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Dict = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase_ : Any = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase_ , lowerCAmelCase_ : Any = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
lowerCAmelCase_ , lowerCAmelCase_ : str = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase ,(dist + 1, (nx, ny)) )
lowerCAmelCase_ : Any = dist + 1
lowerCAmelCase_ : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = SavedModel()
lowerCAmelCase__ : int = []
with open(os.path.join(_a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
lowerCAmelCase__ : Optional[int] = json.load(_a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_a )] )
with open(_a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
lowerCAmelCase__ : Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCAmelCase__ : List[Any] = sorted(_a )
lowerCAmelCase__ : Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_a )
if strict and len(_a ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(_a ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*_a , sep='''\n''' )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowerCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 211 |
import math
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any=0 )-> Optional[Any]: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Optional[int] = n
lowerCAmelCase__ : List[Any] = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowerCAmelCase__ : str = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase__( self : List[Any] )-> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase__ : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str )-> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211 | 1 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ : Any = 16
snake_case_ : List[str] = 32
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = 1_6 ):
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_UpperCamelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase : List[str] = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase : Tuple = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase : int = 1_6
elif accelerator.mixed_precision != "no":
_UpperCamelCase : str = 8
else:
_UpperCamelCase : str = None
return tokenizer.pad(
UpperCAmelCase_ , padding='longest' , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
_UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , drop_last=UpperCAmelCase_ )
_UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialize accelerator
_UpperCamelCase : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : Optional[int] = config['lr']
_UpperCamelCase : Optional[int] = int(config['num_epochs'] )
_UpperCamelCase : Tuple = int(config['seed'] )
_UpperCamelCase : int = int(config['batch_size'] )
_UpperCamelCase : Tuple = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_UpperCamelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCamelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
_UpperCamelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[int] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase : Any = AdamW(params=model.parameters() , lr=UpperCAmelCase_ )
# Instantiate scheduler
_UpperCamelCase : str = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ )
_UpperCamelCase : Tuple = outputs.loss
_UpperCamelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : int = model(**UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCAmelCase_ )
def A__ ( ):
_UpperCamelCase : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_UpperCamelCase : int = parser.parse_args()
_UpperCamelCase : List[str] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 83 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase_ = True
lowerCAmelCase_ = "ml.p3.2xlarge"
lowerCAmelCase_ = "accelerate_sagemaker_execution_role"
lowerCAmelCase_ = "hf-sm"
lowerCAmelCase_ = "us-east-1"
lowerCAmelCase_ = 1
lowerCAmelCase_ = "accelerate-sagemaker-1"
lowerCAmelCase_ = "1.6"
lowerCAmelCase_ = "4.4"
lowerCAmelCase_ = "train.py"
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __snake_case ( unittest.TestCase ):
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , _lowercase )
assert isinstance(converted_args["""do_train"""] , _lowercase )
assert isinstance(converted_args["""epochs"""] , _lowercase )
assert isinstance(converted_args["""learning_rate"""] , _lowercase )
assert isinstance(converted_args["""max_steps"""] , _lowercase )
with pytest.raises(_lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 219 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_A = Lock()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ : Tuple = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ : Optional[int] = min(lowerCAmelCase , lowerCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ : Optional[Any] = max(lowerCAmelCase , lowerCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> Any:
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ : Dict = Pipe()
UpperCAmelCase__ : Any = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ : Optional[Any] = temp_rs
UpperCAmelCase__ : int = temp_rr
for i in range(1 , len(lowerCAmelCase ) - 1 ):
UpperCAmelCase__ : Union[str, Any] = Pipe()
UpperCAmelCase__ : Optional[int] = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ : Optional[int] = temp_rs
UpperCAmelCase__ : Any = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase , args=(
len(lowerCAmelCase ) - 1,
arr[len(lowerCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase ) ):
UpperCAmelCase__ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Tuple = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*lowerCAmelCase )
UpperCAmelCase__ : int = odd_even_transposition(lowerCAmelCase )
print("""Sorted List\n""" )
print(*lowerCAmelCase )
if __name__ == "__main__":
main()
| 166 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : Tuple = pad_token_id
UpperCAmelCase__ : Any = max_length
UpperCAmelCase__ : str = vocab
UpperCAmelCase__ : Union[str, Any] = merges
UpperCAmelCase__ : Tuple = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase__ : Tuple = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase ):
"""simple docstring"""
return cls(**_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.tf_tokenizer(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase__ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase__ , UpperCAmelCase__ : str = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 166 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( lowerCamelCase__ : str = "" ):
'''simple docstring'''
lowerCamelCase = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
lowerCamelCase = BeautifulSoup(requests.get(lowerCamelCase__ ).text , """html.parser""" )
lowerCamelCase = soup.find_all("""td""" , attrs="""titleColumn""" )
lowerCamelCase = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCamelCase__ , lowerCamelCase__ )
}
def __lowerCamelCase ( lowerCamelCase__ : str = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
lowerCamelCase = get_imdb_top_aaa_movies()
with open(lowerCamelCase__ , """w""" , newline="""""" ) as out_file:
lowerCamelCase = csv.writer(lowerCamelCase__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 252 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 1 |
"""simple docstring"""
import string
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ''
for i in sequence:
__lowerCAmelCase = ord(lowerCAmelCase_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def a_ ( lowerCAmelCase_ : Any ):
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase_ )] if c in letters else c for c in sequence )
def a_ ( ):
from timeit import timeit
print('Running performance benchmarks...' )
__lowerCAmelCase = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)", setup=lowerCAmelCase_ )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)", setup=lowerCAmelCase_ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 369 |
_snake_case : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_snake_case : List[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = from_type.lower().strip('s' )
__lowerCAmelCase = to_type.lower().strip('s' )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
__lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
__lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
__lowerCAmelCase = 1
if from_exponent > to_exponent:
__lowerCAmelCase = from_exponent - to_exponent
else:
__lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(10, lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase = logging.getLogger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
if metric == "rouge2":
SCREAMING_SNAKE_CASE = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
SCREAMING_SNAKE_CASE = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
SCREAMING_SNAKE_CASE = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
SCREAMING_SNAKE_CASE = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase__ )
@rank_zero_only
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
SCREAMING_SNAKE_CASE = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
SCREAMING_SNAKE_CASE = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE = od / 'test_results.txt'
SCREAMING_SNAKE_CASE = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
SCREAMING_SNAKE_CASE = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE = metrics[key]
if isinstance(lowerCAmelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE = val.item()
SCREAMING_SNAKE_CASE = F'{key}: {val:.6f}\n'
writer.write(lowerCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase__ )
@rank_zero_only
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
try:
SCREAMING_SNAKE_CASE = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE = count_trainable_parameters(lowerCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , 'test' )
@rank_zero_only
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 113 |
"""simple docstring"""
from __future__ import annotations
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> list[int]:
return [ord(SCREAMING_SNAKE_CASE_ ) - 96 for elem in plain]
def lowercase (SCREAMING_SNAKE_CASE_ : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase () -> None:
SCREAMING_SNAKE_CASE = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , SCREAMING_SNAKE_CASE_ )
print('Decoded:' , decode(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
| 113 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : List[Any] ):
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase__ : str = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(SCREAMING_SNAKE_CASE )
from datasets import load_dataset
lowercase__ : str = load_dataset("nielsr/rvlcdip-demo" )
lowercase__ : List[Any] = dataset["train"][0]["image"].convert("RGB" )
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = outputs.logits
lowercase__ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Optional[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : int = shift_tokens_right(SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
lowercase__ : Dict = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE , onehot(SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 121 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
lowercase_ = {"bert_for_seq_generation": 512}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[int] = []
__lowerCamelCase : str = ['input_ids', 'attention_mask']
def __init__(self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A="<::::>" , A = None , **A , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> List[str]:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> Dict:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A , out_type=A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A )
def a__ (self , A ) -> str:
"""simple docstring"""
_a = self.sp_model.IdToPiece(A )
return token
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = []
_a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
_a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 211 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __A ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class __A ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : int = datasets.Audio()
__lowerCamelCase : str = 'audio'
__lowerCamelCase : Optional[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Union[str, Any] = AudioClassification(audio_column='audio' , label_column='label' )
lowercase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowercase_ = AUDIO_EXTENSIONS
| 211 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase : List[Any] = 10
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
for i in range(_lowerCAmelCase , _lowerCAmelCase ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ (_lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Union[str, Any] = len(_lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = (left + right) // 3 + 1
__UpperCamelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCamelCase : Any = one_third - 1
elif array[two_third] < target:
__UpperCamelCase : List[Any] = two_third + 1
else:
__UpperCamelCase : Any = one_third + 1
__UpperCamelCase : Any = two_third - 1
else:
return -1
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
if left < right:
if right - left < precision:
return lin_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : List[Any] = (left + right) // 3 + 1
__UpperCamelCase : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCAmelCase , one_third - 1 , _lowerCAmelCase , _lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCAmelCase , _lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[int] = input("Enter numbers separated by comma:\n").strip()
lowercase : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowercase : List[str] = int(input("Enter the number to be found in the list:\n").strip())
lowercase : Optional[Any] = ite_ternary_search(collection, target)
lowercase : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found")
| 171 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_ (_lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
# Construct model
if openai_config_file == "":
__UpperCamelCase : Any = OpenAIGPTConfig()
else:
__UpperCamelCase : Union[str, Any] = OpenAIGPTConfig.from_json_file(_lowerCAmelCase )
__UpperCamelCase : int = OpenAIGPTModel(_lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
__UpperCamelCase : List[str] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__UpperCamelCase : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowercase : Tuple = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 171 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
__lowercase =BlipImageProcessor()
__lowercase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
__lowercase =BlipaProcessor(_lowerCAmelCase , _lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : Union[str, Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase).tokenizer
def __lowerCamelCase ( self : Optional[Any] , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase).image_processor
def __lowerCamelCase ( self : str):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__lowercase =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowercase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__lowercase =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0)
__lowercase =BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowerCAmelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =self.prepare_image_inputs()
__lowercase =image_processor(_lowerCAmelCase , return_tensors='np')
__lowercase =processor(images=_lowerCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =processor(text=_lowerCAmelCase)
__lowercase =tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase):
processor()
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase =processor.batch_decode(_lowerCAmelCase)
__lowercase =tokenizer.batch_decode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 166 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """umt5"""
lowerCAmelCase__ = ["""past_key_values"""]
def __init__( self : Optional[int] , _lowerCAmelCase : int=2_5_0_1_1_2 , _lowerCAmelCase : Union[str, Any]=5_1_2 , _lowerCAmelCase : List[Any]=6_4 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : str=3_2 , _lowerCAmelCase : List[str]=1_2_8 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=1e-6 , _lowerCAmelCase : List[Any]=1.0 , _lowerCAmelCase : Union[str, Any]="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple="T5Tokenizer" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , **_lowerCAmelCase : int , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =vocab_size
__lowercase =d_model
__lowercase =d_kv
__lowercase =d_ff
__lowercase =num_layers
__lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase =num_heads
__lowercase =relative_attention_num_buckets
__lowercase =relative_attention_max_distance
__lowercase =dropout_rate
__lowercase =layer_norm_epsilon
__lowercase =initializer_factor
__lowercase =feed_forward_proj
__lowercase =use_cache
__lowercase =self.feed_forward_proj.split('-')
__lowercase =act_info[-1]
__lowercase =act_info[0] == 'gated'
if len(_lowerCAmelCase) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
__lowercase ='gelu_new'
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.d_model
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self.num_heads
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.num_layers
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase ='past_encoder_sequence + sequence'
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 5e-4
| 166 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a :List[Any] = logging.get_logger(__name__)
__a :Optional[Any] = '▁'
__a :int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__a :Any = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__a :Union[str, Any] = {'vinai/bartpho-syllable': 1024}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : str = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : List[Any]="</s>" , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : int="<unk>" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Tuple="<mask>" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
A_ = vocab_file
A_ = monolingual_vocab_file
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A_ = {}
A_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
A_ = cnt
cnt += 1
with open(UpperCAmelCase , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A_ = line.strip().split()[0]
A_ = len(self.fairseq_tokens_to_ids )
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
A_ = len(self.fairseq_tokens_to_ids )
A_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ):
A_ = self.__dict__.copy()
A_ = None
A_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , UpperCAmelCase : str ):
A_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def __A ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self : List[Any] ):
return len(self.fairseq_ids_to_tokens )
def __A ( self : Tuple ):
A_ = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : Dict , UpperCAmelCase : str ):
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __A ( self : str , UpperCAmelCase : Any ):
return self.fairseq_ids_to_tokens[index]
def __A ( self : Any , UpperCAmelCase : Any ):
A_ = "".join(UpperCAmelCase ).replace(UpperCAmelCase , " " ).strip()
return out_string
def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 329 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 1 |
"""simple docstring"""
import math
def lowercase ( _snake_case : Optional[int] , _snake_case : Union[str, Any] ) ->int:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_snake_case )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE : Dict = """Enter the base and the power separated by a comma: """
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = map(int, input(prompt).split(""","""))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE : Optional[int] = res(xa, ya)
SCREAMING_SNAKE_CASE : Union[str, Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 102 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
A__ ,r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" ,)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Optional[int], lowerCamelCase : GenericTensor ):
'''simple docstring'''
if self.framework == "tf":
lowercase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCamelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowercase__ ( self : List[str], lowerCamelCase : GenericTensor ):
'''simple docstring'''
lowercase__ = self.get_masked_index(lowerCamelCase )
lowercase__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''', self.model.base_model_prefix, F"""No mask_token ({self.tokenizer.mask_token}) found on the input""", )
def lowercase__ ( self : Optional[Any], lowerCamelCase : GenericTensor ):
'''simple docstring'''
if isinstance(lowerCamelCase, lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int]=None, **lowerCamelCase : Dict ):
'''simple docstring'''
if return_tensors is None:
lowercase__ = self.framework
lowercase__ = self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase )
self.ensure_exactly_one_mask_token(lowerCamelCase )
return model_inputs
def lowercase__ ( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.model(**lowerCamelCase )
lowercase__ = model_inputs['''input_ids''']
return model_outputs
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Tuple=5, lowerCamelCase : List[Any]=None ):
'''simple docstring'''
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase__ = target_ids.shape[0]
lowercase__ = model_outputs['''input_ids'''][0]
lowercase__ = model_outputs['''logits''']
if self.framework == "tf":
lowercase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase__ = outputs.numpy()
lowercase__ = outputs[0, masked_index, :]
lowercase__ = stable_softmax(lowerCamelCase, axis=-1 )
if target_ids is not None:
lowercase__ = tf.gather_nd(tf.squeeze(lowerCamelCase, 0 ), target_ids.reshape(-1, 1 ) )
lowercase__ = tf.expand_dims(lowerCamelCase, 0 )
lowercase__ = tf.math.top_k(lowerCamelCase, k=lowerCamelCase )
lowercase__ , lowercase__ = topk.values.numpy(), topk.indices.numpy()
else:
lowercase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase__ = outputs[0, masked_index, :]
lowercase__ = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase__ = probs[..., target_ids]
lowercase__ , lowercase__ = probs.topk(lowerCamelCase )
lowercase__ = []
lowercase__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist() ) ):
lowercase__ = []
for v, p in zip(_values, _predictions ):
# Copy is important since we're going to modify this array in place
lowercase__ = input_ids.numpy().copy()
if target_ids is not None:
lowercase__ = target_ids[p].tolist()
lowercase__ = p
# Filter padding out:
lowercase__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase__ = self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowercase__ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(lowerCamelCase )
result.append(lowerCamelCase )
if single_mask:
return result[0]
return result
def lowercase__ ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Dict=None ):
'''simple docstring'''
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [targets]
try:
lowercase__ = self.tokenizer.get_vocab()
except Exception:
lowercase__ = {}
lowercase__ = []
for target in targets:
lowercase__ = vocab.get(lowerCamelCase, lowerCamelCase )
if id_ is None:
lowercase__ = self.tokenizer(
lowerCamelCase, add_special_tokens=lowerCamelCase, return_attention_mask=lowerCamelCase, return_token_type_ids=lowerCamelCase, max_length=1, truncation=lowerCamelCase, )['''input_ids''']
if len(lowerCamelCase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowercase__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowercase__ = list(set(lowerCamelCase ) )
if len(lowerCamelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowercase__ = np.array(lowerCamelCase )
return target_ids
def lowercase__ ( self : List[str], lowerCamelCase : int=None, lowerCamelCase : Any=None ):
'''simple docstring'''
lowercase__ = {}
if targets is not None:
lowercase__ = self.get_target_ids(lowerCamelCase, lowerCamelCase )
lowercase__ = target_ids
if top_k is not None:
lowercase__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''', self.model.base_model_prefix, '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : List[Any], lowerCamelCase : Optional[Any], *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = super().__call__(lowerCamelCase, **lowerCamelCase )
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 207 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =CanineTokenizer
lowerCamelCase : List[str] =False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase : Union[str, Any] ) -> CanineTokenizer:
"""simple docstring"""
__lowerCAmelCase : str = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
__lowerCAmelCase : List[Any] = 10_24
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.canine_tokenizer
__lowerCAmelCase : Any = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__lowerCAmelCase : Optional[Any] = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__lowerCAmelCase : str = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.canine_tokenizer
__lowerCAmelCase : List[str] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__lowerCAmelCase : Dict = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCAmelCase )
self.assertIn("""attention_mask""" , lowerCAmelCase )
self.assertIn("""token_type_ids""" , lowerCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.canine_tokenizer
__lowerCAmelCase : Optional[int] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__lowerCAmelCase : Dict = tokenizer(
text_target=lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : List[Any] = tempfile.mkdtemp()
__lowerCAmelCase : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
__lowerCAmelCase : Dict = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : str = tokenizer.__class__.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : int = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
__lowerCAmelCase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : List[Any] = tempfile.mkdtemp()
__lowerCAmelCase : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
__lowerCAmelCase : Union[str, Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowerCAmelCase : List[str] = chr(0Xe007 )
additional_special_tokens.append(lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__lowerCAmelCase : Optional[int] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Any = tokenizer.__class__.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertIn(lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCAmelCase : List[str] = tokenizer.__class__.from_pretrained(lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.get_clean_sequence(lowerCAmelCase )
# a special token for Canine can be defined as follows:
__lowerCAmelCase : Union[str, Any] = 0Xe005
__lowerCAmelCase : str = chr(lowerCAmelCase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__lowerCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , 1 )
__lowerCAmelCase : Optional[int] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Any = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : List[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , input_encoded + special_token_id )
__lowerCAmelCase : Tuple = tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : Optional[Any] = chr(0Xe005 )
__lowerCAmelCase : Tuple = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , 1 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase )
self.assertEqual(token_a[0] , lowerCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__lowerCAmelCase : str = 0Xe006
__lowerCAmelCase : Union[str, Any] = chr(lowerCAmelCase )
__lowerCAmelCase : Tuple = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase )
tokenizer.from_pretrained(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase : List[Any] = json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase : Any = json.load(lowerCAmelCase )
# a special token for Canine can be defined as follows:
__lowerCAmelCase : List[Any] = 0Xe006
__lowerCAmelCase : Union[str, Any] = chr(lowerCAmelCase )
__lowerCAmelCase : str = [new_token_a]
__lowerCAmelCase : Optional[int] = [new_token_a]
with open(os.path.join(lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase : List[Any] = tokenizer_class.from_pretrained(lowerCAmelCase , extra_ids=0 )
self.assertIn(lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__lowerCAmelCase : List[str] = 0Xe007
__lowerCAmelCase : Optional[Any] = chr(lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase : Union[str, Any] = [AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase )]
__lowerCAmelCase : str = tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , extra_ids=0 )
self.assertIn(lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : List[str] = """hello world"""
if self.space_between_special_tokens:
__lowerCAmelCase : Optional[Any] = """[CLS] hello world [SEP]"""
else:
__lowerCAmelCase : Union[str, Any] = input
__lowerCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.decode(lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase , [output, output.lower()] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : int = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__lowerCAmelCase : Optional[Any] = """a"""
__lowerCAmelCase : List[Any] = ord(lowerCAmelCase )
for attr in attributes_list:
setattr(lowerCAmelCase , attr + """_id""" , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , attr + """_id""" ) , lowerCAmelCase )
setattr(lowerCAmelCase , attr + """_id""" , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , attr + """_id""" ) , lowerCAmelCase )
setattr(lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
__lowerCAmelCase : Union[str, Any] = 0Xe006
__lowerCAmelCase : List[str] = chr(lowerCAmelCase )
setattr(lowerCAmelCase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
| 139 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ () -> str:
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase : List[Any] = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase : Any = json.loads(__A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase : Any = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase : Union[str, Any] = json.loads(__A )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , lowerCAmelCase , )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase : Optional[Any] = torch.device("""cpu""" )
__lowerCAmelCase : Union[str, Any] = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase : int = smp.local_rank()
__lowerCAmelCase : str = torch.device("""cuda""" , lowerCAmelCase )
__lowerCAmelCase : Tuple = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase : List[str] = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase : Tuple = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase : Optional[int] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase : List[str] = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase : int = 1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase )
return device
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
return False
| 139 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Any = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
UpperCAmelCase__ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase__ ( a ) -> int:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_A: Tuple = model_type_to_module_name(a )
_A: Tuple = importlib.import_module(f""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A: Any = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def lowerCamelCase__ ( a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ) -> Tuple:
_A: Optional[int] = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
"""simple docstring"""
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase_ )
def __magic_name__ ( cls : Dict , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: Optional[int] = kwargs.pop('''config''' , lowerCAmelCase_ )
_A: str = kwargs.pop('''trust_remote_code''' , lowerCAmelCase_ )
_A: List[str] = True
_A , _A: Dict = ImageProcessingMixin.get_image_processor_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
_A: int = config_dict.get('''image_processor_type''' , lowerCAmelCase_ )
_A: Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_A: Any = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_A: Tuple = config_dict.pop('''feature_extractor_type''' , lowerCAmelCase_ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_A: List[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_A: Union[str, Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_A: List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# It could be in `config.image_processor_type``
_A: Optional[Any] = getattr(lowerCAmelCase_ , '''image_processor_type''' , lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_A: str = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_A: str = image_processor_class_from_name(lowerCAmelCase_ )
_A: List[Any] = image_processor_auto_map is not None
_A: Optional[Any] = image_processor_class is not None or type(lowerCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING
_A: Any = resolve_trust_remote_code(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if has_remote_code and trust_remote_code:
_A: Optional[Any] = get_class_from_dynamic_module(
lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
_A: List[Any] = kwargs.pop('''code_revision''' , lowerCAmelCase_ )
if os.path.isdir(lowerCAmelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_A: Any = IMAGE_PROCESSOR_MAPPING[type(lowerCAmelCase_ )]
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __magic_name__ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(lowerCAmelCase_ , lowerCAmelCase_ )
| 121 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''swin2sr'''
__UpperCamelCase : str = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , lowerCAmelCase_ : int=6_4 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=1_8_0 , lowerCAmelCase_ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : Tuple=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Any=2.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Optional[Any]=1e-5 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : int=1.0 , lowerCAmelCase_ : Any="1conv" , lowerCAmelCase_ : List[str]="pixelshuffle" , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = image_size
_A: Any = patch_size
_A: Any = num_channels
_A: Union[str, Any] = embed_dim
_A: int = depths
_A: List[Any] = len(lowerCAmelCase_ )
_A: int = num_heads
_A: Any = window_size
_A: Optional[int] = mlp_ratio
_A: int = qkv_bias
_A: List[Any] = hidden_dropout_prob
_A: List[str] = attention_probs_dropout_prob
_A: List[Any] = drop_path_rate
_A: Any = hidden_act
_A: List[str] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: str = initializer_range
_A: int = upscale
_A: int = img_range
_A: Optional[Any] = resi_connection
_A: int = upsampler
| 121 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
snake_case : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', '''beit.embeddings.cls_token'''),
(F'{prefix}patch_embed.proj.weight', '''beit.embeddings.patch_embeddings.projection.weight'''),
(F'{prefix}patch_embed.proj.bias', '''beit.embeddings.patch_embeddings.projection.bias'''),
(F'{prefix}pos_embed', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
snake_case : Optional[int] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
snake_case : List[Any] = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
snake_case : Union[str, Any] = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
snake_case : Dict = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
snake_case : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case : Union[str, Any] = q_bias
snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case : Union[str, Any] = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
snake_case : int = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
snake_case : List[Any] = gamma_a
snake_case : Tuple = gamma_a
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
snake_case : List[str] = dct.pop(UpperCamelCase__ )
snake_case : List[Any] = val
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
snake_case : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True
snake_case : List[str] = BeitConfig(use_absolute_position_embeddings=UpperCamelCase__ , use_mask_token=UpperCamelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case : Optional[Any] = 1024
snake_case : Dict = 4096
snake_case : int = 24
snake_case : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
snake_case : Optional[int] = 16
snake_case : Any = '''huggingface/label-files'''
snake_case : str = '''rvlcdip-id2label.json'''
snake_case : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case : int = idalabel
snake_case : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case : List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )['''model''']
snake_case : Any = create_rename_keys(UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
# load HuggingFace model
snake_case : Union[str, Any] = BeitForMaskedImageModeling(UpperCamelCase__ ) if has_lm_head else BeitForImageClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
snake_case : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ )
snake_case : Tuple = prepare_img()
snake_case : Dict = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
snake_case : str = encoding['''pixel_values''']
snake_case : str = model(UpperCamelCase__ )
snake_case : Any = outputs.logits
# verify logits
snake_case : Tuple = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(UpperCamelCase__ ), "Shape of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
if has_lm_head:
snake_case : List[str] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
snake_case : Optional[int] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
lowercase__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionLDMaDPipeline
lowerCamelCase = TEXT_TO_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case : int = CLIPTextModel(UpperCamelCase__ )
snake_case : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ) -> Optional[int]:
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
snake_case : Optional[int] = torch.manual_seed(UpperCamelCase__ )
else:
snake_case : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : str = StableDiffusionLDMaDPipeline(**UpperCamelCase__ )
snake_case : Union[str, Any] = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : List[str] = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : Tuple = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : int = output.rgb, output.depth
snake_case : str = rgb[0, -3:, -3:, -1]
snake_case : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case : int = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
snake_case : str = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : int = self.get_dummy_components()
snake_case : Any = StableDiffusionLDMaDPipeline(**UpperCamelCase__ )
snake_case : Optional[Any] = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : int = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : str = 3 * [inputs['''prompt''']]
# forward
snake_case : Union[str, Any] = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Tuple = rgb_slice_a[0, -3:, -3:, -1]
snake_case : List[str] = depth_slice_a[0, -3:, -1]
snake_case : int = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : Optional[int] = 3 * [inputs.pop('''prompt''' )]
snake_case : Dict = ldmad_pipe.tokenizer(
UpperCamelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='''pt''' , )
snake_case : Optional[Any] = text_inputs['''input_ids'''].to(UpperCamelCase__ )
snake_case : Any = ldmad_pipe.text_encoder(UpperCamelCase__ )[0]
snake_case : Tuple = prompt_embeds
# forward
snake_case : List[Any] = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Dict = output.rgb, output.depth
snake_case : Any = rgb_slice_a[0, -3:, -3:, -1]
snake_case : Dict = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components()
snake_case : List[str] = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
snake_case : Optional[Any] = StableDiffusionLDMaDPipeline(**UpperCamelCase__ )
snake_case : Dict = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : str = '''french fries'''
snake_case : List[str] = ldmad_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Union[str, Any] = rgb[0, -3:, -3:, -1]
snake_case : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case : Dict = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
snake_case : Any = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]="cpu" , UpperCamelCase__ : Optional[int]=torch.floataa , UpperCamelCase__ : Union[str, Any]=0 ) -> Any:
"""simple docstring"""
snake_case : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Optional[Any] = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) )
snake_case : Any = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
snake_case : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
snake_case : Optional[Any] = ldmad_pipe.to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_inputs(UpperCamelCase__ )
snake_case : str = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : List[Any] = output.rgb, output.depth
snake_case : Optional[Any] = rgb[0, -3:, -3:, -1].flatten()
snake_case : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
snake_case : Optional[Any] = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
snake_case : str = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any="cpu" , UpperCamelCase__ : Optional[int]=torch.floataa , UpperCamelCase__ : Optional[int]=0 ) -> str:
"""simple docstring"""
snake_case : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Optional[Any] = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) )
snake_case : int = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
snake_case : List[str] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_inputs(UpperCamelCase__ )
snake_case : str = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Union[str, Any] = 0.495_586
snake_case : Tuple = 0.33_795_515
snake_case : Dict = 112.48_518
snake_case : Optional[int] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : int = self.get_inputs(UpperCamelCase__ )
snake_case : List[Any] = ldmad_pipe(**UpperCamelCase__ )
snake_case ,snake_case : Union[str, Any] = output.rgb, output.depth
snake_case : Tuple = 0.4_194_127
snake_case : Optional[Any] = 0.35_375_586
snake_case : Any = 0.5_638_502
snake_case : int = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 83 | 0 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__(self ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : int = nn.Linear(3 , 4 )
UpperCAmelCase__ : List[Any] = nn.BatchNormad(4 )
UpperCAmelCase__ : Tuple = nn.Linear(4 , 5 )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCamelCase , model.state_dict() )
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , """index.json""" )
self.assertTrue(os.path.isfile(_lowerCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCAmelCase__ : Tuple = os.path.join(_lowerCamelCase , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_lowerCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCAmelCase__ : Any = torch.randn(2 , 3 , dtype=_lowerCamelCase )
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = offload_weight(_lowerCamelCase , """weight""" , _lowerCamelCase , {} )
UpperCAmelCase__ : str = os.path.join(_lowerCamelCase , """weight.dat""" )
self.assertTrue(os.path.isfile(_lowerCamelCase ) )
self.assertDictEqual(_lowerCamelCase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(_lowerCamelCase ).split(""".""" )[1]}} )
UpperCAmelCase__ : List[str] = load_offloaded_weight(_lowerCamelCase , index["""weight"""] )
self.assertTrue(torch.equal(_lowerCamelCase , _lowerCamelCase ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = ModelForTest()
UpperCAmelCase__ : Union[str, Any] = model.state_dict()
UpperCAmelCase__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
UpperCAmelCase__ : Tuple = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) )
UpperCAmelCase__ : Any = {k: v for k, v in state_dict.items() if """weight""" in k}
UpperCAmelCase__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCamelCase , _lowerCamelCase )
# Duplicates are removed
UpperCAmelCase__ : Dict = OffloadedWeightsLoader(state_dict=_lowerCamelCase , save_folder=_lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(_lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowerCamelCase , weight_map[key] ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
UpperCAmelCase__ : Dict = extract_submodules_state_dict(_lowerCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(_lowerCamelCase , {"""a.1""": 0, """a.2""": 2} )
UpperCAmelCase__ : Tuple = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
UpperCAmelCase__ : Optional[int] = extract_submodules_state_dict(_lowerCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(_lowerCamelCase , {"""a.1.a""": 0, """a.2.a""": 2} )
| 171 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptTokenizer
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase__ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase__ : Union[str, Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """lower newer"""
UpperCAmelCase__ : int = """lower newer"""
return input_text, output_text
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[str] = """lower"""
UpperCAmelCase__ : Optional[Any] = ["""low""", """er</w>"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tokens + ["""<unk>"""]
UpperCAmelCase__ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 171 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 329 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE_:str = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def __UpperCamelCase ( _lowerCAmelCase = "dhaka" , _lowerCAmelCase = 5 ) -> int:
"""simple docstring"""
A : Optional[int] = min(_lowerCAmelCase , 50 ) # Prevent abuse!
A : List[str] = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
A : Union[str, Any] = requests.get("""https://www.google.com/search""" , params=_lowerCAmelCase , headers=_lowerCAmelCase )
A : str = BeautifulSoup(html.text , """html.parser""" )
A : Tuple = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
A : int = json.dumps(_lowerCAmelCase )
A : Optional[Any] = json.loads(_lowerCAmelCase )
A : Tuple = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , _lowerCAmelCase , )
if not matched_google_image_data:
return 0
A : str = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(_lowerCAmelCase ) , )
A : Optional[int] = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , _lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(_lowerCAmelCase ):
if index >= max_images:
return index
A : Optional[int] = bytes(_lowerCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
A : str = bytes(_lowerCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
A : List[Any] = urllib.request.build_opener()
A : List[Any] = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(_lowerCAmelCase )
A : Optional[int] = f'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
_lowerCAmelCase , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
SCREAMING_SNAKE_CASE_:Union[str, Any] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("""Please provide a search term.""")
raise
| 115 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class _snake_case ( _a ):
_A : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_A : ClassVar[Features] = Features({'''audio''': Audio()} )
_A : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_A : str = "audio"
_A : str = "labels"
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE:List[str] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE:List[Any] = self.label_schema.copy()
SCREAMING_SNAKE_CASE:Optional[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE:Tuple = label_schema
return task_template
@property
def __UpperCamelCase ( self : int ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 139 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( _a ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : CLIPSegForImageSegmentation ,SCREAMING_SNAKE_CASE__ : CLIPSegProcessor ,SCREAMING_SNAKE_CASE__ : AutoencoderKL ,SCREAMING_SNAKE_CASE__ : CLIPTextModel ,SCREAMING_SNAKE_CASE__ : CLIPTokenizer ,SCREAMING_SNAKE_CASE__ : UNetaDConditionModel ,SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,SCREAMING_SNAKE_CASE__ : StableDiffusionSafetyChecker ,SCREAMING_SNAKE_CASE__ : CLIPImageProcessor ,):
super().__init__()
if hasattr(scheduler.config ,"steps_offset" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE:Union[str, Any] = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" ,"1.0.0" ,SCREAMING_SNAKE_CASE__ ,standard_warn=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = dict(scheduler.config )
SCREAMING_SNAKE_CASE:Union[str, Any] = 1
SCREAMING_SNAKE_CASE:Dict = FrozenDict(SCREAMING_SNAKE_CASE__ )
if hasattr(scheduler.config ,"skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE:List[Any] = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" ,"1.0.0" ,SCREAMING_SNAKE_CASE__ ,standard_warn=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = dict(scheduler.config )
SCREAMING_SNAKE_CASE:int = True
SCREAMING_SNAKE_CASE:Optional[int] = FrozenDict(SCREAMING_SNAKE_CASE__ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE__ ,segmentation_processor=SCREAMING_SNAKE_CASE__ ,vae=SCREAMING_SNAKE_CASE__ ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,)
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE:Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE:str = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self : Any ):
if self.device != torch.device("meta" ) or not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, PIL.Image.Image] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int = 512 ,SCREAMING_SNAKE_CASE__ : int = 512 ,SCREAMING_SNAKE_CASE__ : int = 50 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : Dict ,):
SCREAMING_SNAKE_CASE:str = self.segmentation_processor(
text=[text] ,images=[image] ,padding="max_length" ,return_tensors="pt" ).to(self.device )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.segmentation_model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE:Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE:Any = StableDiffusionInpaintPipeline(
vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,)
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,)
| 139 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class _UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
__UpperCamelCase : Tuple = """maskformer-swin"""
__UpperCamelCase : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , snake_case_ : Union[str, Any]=224 , snake_case_ : Optional[int]=4 , snake_case_ : List[str]=3 , snake_case_ : Any=96 , snake_case_ : List[str]=[2, 2, 6, 2] , snake_case_ : Optional[int]=[3, 6, 12, 24] , snake_case_ : str=7 , snake_case_ : Any=4.0 , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=0.0 , snake_case_ : Any=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[int]="gelu" , snake_case_ : Tuple=False , snake_case_ : Dict=0.02 , snake_case_ : List[Any]=1e-5 , snake_case_ : List[str]=None , snake_case_ : Dict=None , **snake_case_ : Dict , ):
super().__init__(**_a )
UpperCamelCase_: Tuple = image_size
UpperCamelCase_: Dict = patch_size
UpperCamelCase_: int = num_channels
UpperCamelCase_: Any = embed_dim
UpperCamelCase_: Dict = depths
UpperCamelCase_: Tuple = len(_a )
UpperCamelCase_: Optional[int] = num_heads
UpperCamelCase_: Optional[int] = window_size
UpperCamelCase_: Any = mlp_ratio
UpperCamelCase_: Any = qkv_bias
UpperCamelCase_: List[str] = hidden_dropout_prob
UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_: Any = drop_path_rate
UpperCamelCase_: str = hidden_act
UpperCamelCase_: Any = use_absolute_embeddings
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_: Dict = int(embed_dim * 2 ** (len(_a ) - 1) )
UpperCamelCase_: str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
UpperCamelCase_: str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 362 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
UpperCamelCase_: str = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: Optional[int] = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
@slow
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
UpperCamelCase_: Tuple = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: int = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
UpperCamelCase_: Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
UpperCamelCase_: Any = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
| 223 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :str = TaConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case_ :Optional[Any] = TaForConditionalGeneration(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowercase, _lowercase, _lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 66 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase__ ( lowercase ):
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : str = '1'
_UpperCamelCase : Union[str, Any] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : List[Any] = self.get_env()
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_UpperCamelCase : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : Dict = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = '\nfrom transformers import pipeline\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_UpperCamelCase : Union[str, Any] = self.get_env()
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = '\nfrom transformers import AutoModel\n '
_UpperCamelCase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : Optional[int] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 83 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
_SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ''''''
snake_case = 42
snake_case = 42
snake_case = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase_ ) ,UpperCamelCase_ ):
snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase_ )
return decoded
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
for key in product(UpperCamelCase_ ,repeat=3 ):
snake_case = try_key(UpperCamelCase_ ,UpperCamelCase_ )
if encoded is not None:
possibles.append(UpperCamelCase_ )
return possibles
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (UpperCamelCase_ = "p059_cipher.txt" ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = Path(UpperCamelCase_ ).parent.joinpath(UpperCamelCase_ ).read_text(encoding='''utf-8''' )
snake_case = [int(UpperCamelCase_ ) for number in data.strip().split(''',''' )]
snake_case = filter_valid_chars(UpperCamelCase_ )
for common_word in COMMON_WORDS:
snake_case = filter_common_word(UpperCamelCase_ ,UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
break
snake_case = possibles[0]
return sum(ord(UpperCamelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 362 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'efficientnet'
def __init__( self , __snake_case = 3 , __snake_case = 6_0_0 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2_5_6_0 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.001 , __snake_case = 0.99 , __snake_case = 0.5 , __snake_case = 0.2 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = num_channels
snake_case = image_size
snake_case = width_coefficient
snake_case = depth_coefficient
snake_case = depth_divisor
snake_case = kernel_sizes
snake_case = in_channels
snake_case = out_channels
snake_case = depthwise_padding
snake_case = strides
snake_case = num_block_repeats
snake_case = expand_ratios
snake_case = squeeze_expansion_ratio
snake_case = hidden_act
snake_case = hidden_dim
snake_case = pooling_type
snake_case = initializer_range
snake_case = batch_norm_eps
snake_case = batch_norm_momentum
snake_case = dropout_rate
snake_case = drop_connect_rate
snake_case = sum(__snake_case ) * 4
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-5
| 213 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 7
if "tiny" in model_name:
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 6, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
_lowerCAmelCase = 128
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (4, 8, 16, 32)
_lowerCAmelCase = 12
_lowerCAmelCase = 512
elif "large" in model_name:
_lowerCAmelCase = 192
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (6, 12, 24, 48)
_lowerCAmelCase = 12
_lowerCAmelCase = 768
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "ade20k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
_lowerCAmelCase = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = val
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
_lowerCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[:dim, :]
_lowerCAmelCase = in_proj_bias[: dim]
_lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase = in_proj_weight[
-dim :, :
]
_lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = x.shape
_lowerCAmelCase = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 )
_lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = x.shape
_lowerCAmelCase = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 )
_lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = x.reshape(4 , in_channel // 4 )
_lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = x.reshape(in_channel // 4 , 4 )
_lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , file_name=SCREAMING_SNAKE_CASE_ )[
"state_dict"
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
_lowerCAmelCase = get_upernet_config(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
_lowerCAmelCase = key.replace("bn" , "batch_norm" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCAmelCase = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ )
if "norm" in key:
_lowerCAmelCase = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
_lowerCAmelCase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCAmelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_lowerCAmelCase = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_lowerCAmelCase = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_lowerCAmelCase = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 158 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
'''simple docstring'''
from math import pi, sqrt
def lowercase_ ( lowerCAmelCase__ : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase_ ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase = 1.0
while num:
_UpperCamelCase = float(input('''Gamma of: '''))
print(F'gamma({num}) = {gamma(num)}')
print('''\nEnter 0 to exit...''')
| 352 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _A :
def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase )
__UpperCAmelCase : List[str] = length
__UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Any = True
def __A ( self , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : Optional[int] = False
return x * self.a[0] + self.b[0]
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : str = True
def __A ( self , __UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : int = False
return x * self.a + self.b
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" )
__UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[Any] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
if "label" in examples:
__UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCAmelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 16 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """Salesforce/blip-image-captioning-base"""
__a = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
__a = """image_captioner"""
__a = AutoModelForVisionaSeq
__a = ["""image"""]
__a = ["""text"""]
def __init__( self : Any , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : "Image" ):
'''simple docstring'''
return self.pre_processor(images=UpperCamelCase , return_tensors="""pt""" )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return self.model.generate(**UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0].strip()
| 115 |
"""simple docstring"""
import re
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
if len(re.findall("""[ATCG]""" , _UpperCamelCase ) ) != len(_UpperCamelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Tuple = (EulerDiscreteScheduler,)
_UpperCamelCase:int = 10
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ ={
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _snake_case ( self )-> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ =sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ =scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =output.prev_sample
lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def _snake_case ( self )-> str:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ =sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ =scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =output.prev_sample
lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ =sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
lowerCamelCase_ =scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =output.prev_sample
lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**_SCREAMING_SNAKE_CASE , use_karras_sigmas=_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ =sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
lowerCamelCase_ =scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =output.prev_sample
lowerCamelCase_ =torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
| 49 |
import unittest
from knapsack import greedy_knapsack as kp
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =[10, 20, 30, 40, 50, 60]
lowerCamelCase_ =[2, 4, 6, 8, 10, 12]
lowerCamelCase_ =100
self.assertEqual(kp.calc_profit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 210 )
def _snake_case ( self )-> Any:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """max_weight must greater than zero.""" )
def _snake_case ( self )-> Dict:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """Weight can not be negative.""" )
def _snake_case ( self )-> Dict:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """Profit can not be negative.""" )
def _snake_case ( self )-> Tuple:
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """max_weight must greater than zero.""" )
def _snake_case ( self )-> Any:
self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 49 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
def A (__A : Union[tf.Tensor, np.ndarray] ) -> List[int]:
"""simple docstring"""
if isinstance(__A , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase_ = tf.shape(__A )
if tensor.shape == tf.TensorShape(__A ):
return dynamic
UpperCAmelCase_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__A )]
def A (__A : tf.Tensor , __A : Optional[int] = None , __A : Optional[str] = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=__A , name=__A )
def A (__A : Union[str, Any] , __A : Optional[int] , __A : str , __A : Optional[int]=1E-5 , __A : int=-1 ) -> Any:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__A , __A ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase_ , UpperCAmelCase_ = tf.nn.moments(__A , axes=[axis] , keepdims=__A )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase_ = [1] * inputs.shape.rank
UpperCAmelCase_ = shape_list(__A )[axis]
UpperCAmelCase_ = tf.reshape(__A , __A )
UpperCAmelCase_ = tf.reshape(__A , __A )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase_ = tf.nn.batch_normalization(
__A , __A , __A , offset=__A , scale=__A , variance_epsilon=__A , )
return outputs
def A (__A : Any , __A : List[Any]=0 , __A : Tuple=-1 ) -> Optional[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase_ = tf.shape(__A )
UpperCAmelCase_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__A , __A )
def A (__A : tf.Tensor ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(__A , tf.Tensor ):
UpperCAmelCase_ = tf.convert_to_tensor(__A ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase_ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A (__A : tf.Tensor , __A : int , __A : str = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
__A , tf.cast(__A , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(__A )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def A (__A : Tuple , __A : Dict , __A : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase_ = [x for x in data if len(__A ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
UpperCAmelCase_ = np.asarray(__A )
UpperCAmelCase_ = 1
UpperCAmelCase_ = np.array_split(__A , __A )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase_ = np.array_split(__A , __A )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__A ):
UpperCAmelCase_ = chunk_data
else:
UpperCAmelCase_ = data
def A (__A : List[Any] , __A : Any ) -> int:
"""simple docstring"""
if name in group.attrs:
UpperCAmelCase_ = [n.decode('''utf8''' ) if hasattr(__A , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__A , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def A (__A : List[Any] ) -> Any:
"""simple docstring"""
def _expand_single_ad_tensor(__A : Optional[int] ):
if isinstance(__A , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__A , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __A )
| 51 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" ,set() )
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
class a_ :
def __init__( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = metric_id
class a_ :
__A = [MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" ,HfhMock() )
@pytest.mark.parametrize(
"func, args" ,[(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Tuple ):
if "tmp_path" in args:
lowercase_ :Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowerCamelCase ,match="https://huggingface.co/docs/evaluate" ):
func(*__lowerCamelCase )
| 223 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowerCamelCase = datasets.utils.logging.get_logger(__name__)
class _snake_case (folder_based_builder.FolderBasedBuilderConfig):
__A : bool =None
__A : bool =None
class _snake_case (folder_based_builder.FolderBasedBuilder):
__A : Union[str, Any] =datasets.Audio()
__A : Optional[int] ="audio"
__A : Any =AudioFolderConfig
__A : List[str] # definition at the bottom of the script
__A : Optional[int] =AudioClassification(audio_column="audio" , label_column="label")
_lowerCamelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_lowerCamelCase = AUDIO_EXTENSIONS
| 67 | 0 |
"""simple docstring"""
import math
import random
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : bool = False ) -> List[str]:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ = 0.02
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> Optional[Any]:
__a = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__SCREAMING_SNAKE_CASE ):
# Forward propagation
__a = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__a = (expected / 100) - layer_a
# Error delta
__a = layer_1_error * sigmoid_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("Expected value: "))
lowercase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 45 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase__( ):
lowercase_ : Any = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Parse args
lowercase_ , lowercase_ : Dict = parser.parse_known_args()
if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
lowercase_ : int = parse_unknown_args(__SCREAMING_SNAKE_CASE )
# Run
lowercase_ : List[Any] = args.func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 213 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """mobilenet_v2"""
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Tuple=224 , UpperCamelCase__ : List[Any]=1.0 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Union[str, Any]="relu6" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=0.8 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=0.001 , UpperCamelCase__ : int=255 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = depth_multiplier
SCREAMING_SNAKE_CASE : List[Any] = depth_divisible_by
SCREAMING_SNAKE_CASE : Dict = min_depth
SCREAMING_SNAKE_CASE : List[str] = expand_ratio
SCREAMING_SNAKE_CASE : List[str] = output_stride
SCREAMING_SNAKE_CASE : Tuple = first_layer_is_expansion
SCREAMING_SNAKE_CASE : List[Any] = finegrained_output
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = tf_padding
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : str = semantic_loss_ignore_index
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.11""")
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __A ( self : Tuple ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __A ( self : Any ):
'''simple docstring'''
return 1E-4
| 258 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
UpperCamelCase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 258 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE (A_ ):
lowerCAmelCase = "speech_to_text_2"
lowerCAmelCase = ["past_key_values"]
lowerCAmelCase = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _UpperCAmelCase=1_0000 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Dict = vocab_size
__A : List[Any] = d_model
__A : int = decoder_ffn_dim
__A : int = decoder_layers
__A : str = decoder_attention_heads
__A : Tuple = dropout
__A : str = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : int = activation_function
__A : int = init_std
__A : Tuple = decoder_layerdrop
__A : Dict = use_cache
__A : str = decoder_layers
__A : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__A : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 190 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
snake_case : Union[str, Any] = logging.getLogger(__name__)
snake_case : Optional[int] = '''Hello world! cécé herlolip'''
snake_case : List[str] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
a :Optional[int] = BertAbsConfig(
temp_dir='''.''' , finetune_bert=UpperCAmelCase_ , large=UpperCAmelCase_ , share_emb=UpperCAmelCase_ , use_bert_emb=UpperCAmelCase_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
a :Optional[int] = torch.load(UpperCAmelCase_ , lambda UpperCAmelCase_ , UpperCAmelCase_ : storage )
a :Tuple = AbsSummarizer(UpperCAmelCase_ , torch.device('''cpu''' ) , UpperCAmelCase_ )
original.eval()
a :Optional[Any] = BertAbsSummarizer(UpperCAmelCase_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
a :Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
a :Tuple = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCAmelCase_ )) )
a :List[str] = torch.tensor(UpperCAmelCase_ ).unsqueeze(0 )
a :Any = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCAmelCase_ )) )
a :Dict = torch.tensor(UpperCAmelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
a :Optional[Any] = encoder_input_ids
a :Union[str, Any] = decoder_input_ids
a :Optional[int] = None
a :Any = None
a :List[str] = None
a :List[Any] = None
a :Union[str, Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
a :Any = original(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )[0]
a :List[str] = original.generator(UpperCAmelCase_ )
a :List[Any] = new_model(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )[0]
a :Dict = new_model.generator(UpperCAmelCase_ )
a :List[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(UpperCAmelCase_ ) )
a :int = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(UpperCAmelCase_ ) )
a :Dict = torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
snake_case : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 281 |
from ...configuration_utils import PretrainedConfig
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'bert-generation'
def __init__( self , _lowerCamelCase=5_0358 , _lowerCamelCase=1024 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=4096 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase="absolute" , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Optional[int] = vocab_size
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Any = num_attention_heads
a :List[Any] = hidden_act
a :Tuple = intermediate_size
a :Any = hidden_dropout_prob
a :int = attention_probs_dropout_prob
a :Dict = max_position_embeddings
a :int = initializer_range
a :Union[str, Any] = layer_norm_eps
a :str = position_embedding_type
a :int = use_cache
| 281 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49 |
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = 0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = right or len(lowercase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase__ , lowercase__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Any = 16
_lowerCamelCase : Optional[Any] = 32
def __lowerCamelCase ( A__ , A__ = 16 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
UpperCamelCase__ , padding='longest' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase : Tuple = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase__ ) == "1":
UpperCamelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'] )
UpperCamelCase = int(config['seed'] )
UpperCamelCase = int(config['batch_size'] )
set_seed(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCamelCase = os.path.split(UpperCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCamelCase = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**UpperCamelCase__ )
UpperCamelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(UpperCamelCase__ ),
'epoch': epoch,
} , step=UpperCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__a , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__a , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__a )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = parse_args()
# Import training_script as a module.
snake_case_ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : str = script_fpath.stem
snake_case_ : Optional[Any] = importlib.import_module(__a )
# Patch sys.argv
snake_case_ : Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
A = params
A = np.array(_lowerCAmelCase )
A = np.array([len(_lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__(self : List[str] , _lowerCAmelCase : int ):
return (self.token_ids[index], self.lengths[index])
def __len__(self : str ):
return len(self.lengths )
def A (self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A (self : List[Any] ):
A = self.params.max_model_input_size
A = self.lengths > max_len
logger.info(F"""Splitting {sum(_lowerCAmelCase )} too long sequences.""" )
def divide_chunks(_lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
return [l[i : i + n] for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase )]
A = []
A = []
if self.params.mlm:
A , A = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
A , A = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
A = np.insert(_lowerCAmelCase , 0 , _lowerCAmelCase )
if sub_s[-1] != sep_id:
A = np.insert(_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
assert len(_lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_lowerCAmelCase )
new_tok_ids.extend(_lowerCAmelCase )
new_lengths.extend([len(_lowerCAmelCase ) for l in sub_seqs] )
A = np.array(_lowerCAmelCase )
A = np.array(_lowerCAmelCase )
def A (self : str ):
A = len(self )
A = self.lengths > 11
A = self.token_ids[indices]
A = self.lengths[indices]
A = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def A (self : int ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
A = self.params.special_tok_ids["""unk_token"""]
A = len(self )
A = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A = (unk_occs / self.lengths) < 0.5
A = self.token_ids[indices]
A = self.lengths[indices]
A = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def A (self : Dict ):
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A (self : Dict , _lowerCAmelCase : List[str] ):
A = [t[0] for t in batch]
A = [t[1] for t in batch]
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
# Max for paddings
A = max(_lowerCAmelCase )
# Pad token ids
if self.params.mlm:
A = self.params.special_tok_ids["""pad_token"""]
else:
A = self.params.special_tok_ids["""unk_token"""]
A = [list(t.astype(_lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_lowerCAmelCase )
assert all(len(_lowerCAmelCase ) == max_seq_len_ for t in tk_ )
A = torch.tensor(tk_ ) # (bs, max_seq_len_)
A = torch.tensor(_lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus" )-> dict:
"""simple docstring"""
UpperCamelCase_ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , "html.parser" )
UpperCamelCase_ = soup.findAll("h1" )
UpperCamelCase_ = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 60 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=4 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.1 , _lowercase=True , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , )-> str:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_multiple_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = weight_tying
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self )-> int:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase )-> List[str]:
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
UpperCamelCase_ = output_from_no_past["hidden_states"][0]
UpperCamelCase_ = model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCamelCase_ :str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ :int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :List[str] = False
UpperCamelCase_ :int = False
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
# This regression test was failing with PyTorch < 1.3
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = "abeja/gpt-neox-japanese-2.7b"
UpperCamelCase_ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCamelCase_ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCamelCase_ = GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
UpperCamelCase_ = []
for prompt in prompts:
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="pt" ).input_ids
UpperCamelCase_ = model.generate(_lowercase , max_length=50 )
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 60 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Any = {}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'llama'
UpperCamelCase__ = ['past_key_values']
def __init__( self , _a=32_000 , _a=4_096 , _a=11_008 , _a=32 , _a=32 , _a=None , _a="silu" , _a=2_048 , _a=0.02 , _a=1e-6 , _a=True , _a=0 , _a=1 , _a=2 , _a=1 , _a=False , _a=None , **_a , ):
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Dict = hidden_size
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : Union[str, Any] = num_key_value_heads
__magic_name__ : str = hidden_act
__magic_name__ : List[str] = initializer_range
__magic_name__ : List[str] = rms_norm_eps
__magic_name__ : Optional[int] = pretraining_tp
__magic_name__ : Any = use_cache
__magic_name__ : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def SCREAMING_SNAKE_CASE ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
__magic_name__ : Optional[Any] = self.rope_scaling.get("type" , _a )
__magic_name__ : Dict = self.rope_scaling.get("factor" , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 281 |
def lowerCAmelCase_ ( _snake_case : list[list[int | float]] ) -> int:
'''simple docstring'''
__magic_name__ : Any = len(_snake_case )
__magic_name__ : Optional[Any] = len(matrix[0] )
__magic_name__ : Union[str, Any] = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
__magic_name__ : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ : str = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ : List[str] = matrix[i], matrix[row]
__magic_name__ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__magic_name__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase : Tuple = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : List[Any] ):
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
A = self.diffusers_dir
shutil.copy(
os.path.join(_lowerCAmelCase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def A (self : str ):
A = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def A (self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int=None ):
A = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
A = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase )
A = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(_lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(_lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCAmelCase )
with open(_lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def A (self : Tuple ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , _lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , _lowerCAmelCase ) , )
# Copy consistency with a really long name
A = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("""Bert""" , _lowerCAmelCase , _lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , _lowerCAmelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , _lowerCAmelCase ) , )
| 337 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''umt5'''
__lowerCAmelCase = ['''past_key_values''']
def __init__(self : Dict , _lowerCAmelCase : Optional[int]=25_0112 , _lowerCAmelCase : int=512 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=1e-6 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : Tuple="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="T5Tokenizer" , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def A (self : Optional[Any] ):
return self.d_model
@property
def A (self : List[Any] ):
return self.num_heads
@property
def A (self : Dict ):
return self.num_layers
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A (self : Optional[Any] ):
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A (self : Union[str, Any] ):
return 13
@property
def A (self : Tuple ):
return 5e-4
| 337 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( UpperCamelCase__ , unittest.TestCase ):
A__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def A ( self : List[str] , _a : str=0 ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.random.RandomState(__a )
_SCREAMING_SNAKE_CASE ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : Dict ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**__a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**__a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**__a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**__a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**__a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**__a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * [inputs['prompt']]
# forward
_SCREAMING_SNAKE_CASE =pipe(**__a )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * [inputs.pop('prompt' )]
_SCREAMING_SNAKE_CASE =pipe.tokenizer(
__a , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__a , return_tensors='np' , )
_SCREAMING_SNAKE_CASE =text_inputs['input_ids']
_SCREAMING_SNAKE_CASE =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_SCREAMING_SNAKE_CASE =prompt_embeds
# forward
_SCREAMING_SNAKE_CASE =pipe(**__a )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * ['this is a negative prompt']
_SCREAMING_SNAKE_CASE =negative_prompt
_SCREAMING_SNAKE_CASE =3 * [inputs['prompt']]
# forward
_SCREAMING_SNAKE_CASE =pipe(**__a )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * [inputs.pop('prompt' )]
_SCREAMING_SNAKE_CASE =[]
for p in [prompt, negative_prompt]:
_SCREAMING_SNAKE_CASE =pipe.tokenizer(
__a , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__a , return_tensors='np' , )
_SCREAMING_SNAKE_CASE =text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =embeds
# forward
_SCREAMING_SNAKE_CASE =pipe(**__a )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ort.SessionOptions()
_SCREAMING_SNAKE_CASE =False
return options
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
np.random.seed(0 )
_SCREAMING_SNAKE_CASE =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE ='open neural network exchange'
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='np' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE ='open neural network exchange'
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='np' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
def test_callback_fn(_a : int , _a : int , _a : np.ndarray ) -> None:
_SCREAMING_SNAKE_CASE =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
_SCREAMING_SNAKE_CASE ='Andromeda galaxy in a bottle'
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
pipe(
prompt=__a , num_inference_steps=5 , guidance_scale=7.5 , generator=__a , callback=__a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__a , __a )
assert pipe.safety_checker is None
_SCREAMING_SNAKE_CASE =pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(__a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_SCREAMING_SNAKE_CASE =pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 47 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 0 |
from typing import List
import numpy as np
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= {key: len(lowercase__ ) for key, value in gen_kwargs.items() if isinstance(lowercase__ , lowercase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowercase= max(lists_lengths.values() , default=0 )
return max(1 , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[range]:
'''simple docstring'''
__lowercase= []
for group_idx in range(lowercase__ ):
__lowercase= num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowercase= shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowercase= range(lowercase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowercase__ )
return shards_indices_per_group
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[dict]:
'''simple docstring'''
__lowercase= _number_of_shards_in_gen_kwargs(lowercase__ )
if num_shards == 1:
return [dict(lowercase__ )]
else:
__lowercase= _distribute_shards(num_shards=lowercase__ , max_num_jobs=lowercase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowercase__ , lowercase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowercase__ ) )
]
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowercase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _lowerCamelCase( lowercase__ , lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= {len(lowercase__ ) for value in gen_kwargs.values() if isinstance(lowercase__ , lowercase__ )}
__lowercase= {}
for size in list_sizes:
__lowercase= list(range(lowercase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowercase= dict(lowercase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowercase__ , lowercase__ ):
__lowercase= [value[i] for i in indices_per_size[len(lowercase__ )]]
return shuffled_kwargs
| 368 |
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """timm_backbone"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=3 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = backbone
__magic_name__ = num_channels
__magic_name__ = features_only
__magic_name__ = use_pretrained_backbone
__magic_name__ = True
__magic_name__ = out_indices if out_indices is not None else (-1,)
| 88 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = '''poolformer'''
def __init__( self :Optional[int] ,__snake_case :Tuple=3 ,__snake_case :Any=16 ,__snake_case :str=16 ,__snake_case :Dict=3 ,__snake_case :Optional[int]=4.0 ,__snake_case :int=[2, 2, 6, 2] ,__snake_case :int=[64, 1_28, 3_20, 5_12] ,__snake_case :Any=[7, 3, 3, 3] ,__snake_case :str=[4, 2, 2, 2] ,__snake_case :Optional[int]=[2, 1, 1, 1] ,__snake_case :List[Any]=4 ,__snake_case :List[str]=0.0 ,__snake_case :Optional[Any]="gelu" ,__snake_case :Tuple=True ,__snake_case :Any=1E-5 ,__snake_case :List[str]=0.02 ,**__snake_case :Any ,) -> Union[str, Any]:
a__ = num_channels
a__ = patch_size
a__ = stride
a__ = padding
a__ = pool_size
a__ = hidden_sizes
a__ = mlp_ratio
a__ = depths
a__ = patch_sizes
a__ = strides
a__ = num_encoder_blocks
a__ = drop_path_rate
a__ = hidden_act
a__ = use_layer_scale
a__ = layer_scale_init_value
a__ = initializer_range
super().__init__(**__snake_case )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__( self :Any ) -> float:
return 2E-3
| 109 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = MvpTokenizer
UpperCAmelCase__ : List[Any] = MvpTokenizerFast
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Tuple = filter_roberta_detectors
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
super().setUp()
a__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__ = {'unk_token': '<unk>'}
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowerCamelCase__( self :Optional[Any] ,**__snake_case :Tuple ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__snake_case )
def lowerCamelCase__( self :Optional[Any] ,**__snake_case :Dict ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Union[str, Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def lowerCamelCase__( self :Optional[int] ) -> str:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(__snake_case ,max_length=len(__snake_case ) ,padding=__snake_case ,return_tensors='pt' )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
a__ = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case ,__snake_case )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(__snake_case ,padding=__snake_case ,return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' ,__snake_case )
self.assertIn('attention_mask' ,__snake_case )
self.assertNotIn('labels' ,__snake_case )
self.assertNotIn('decoder_attention_mask' ,__snake_case )
@require_torch
def lowerCamelCase__( self :Any ) -> int:
a__ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(text_target=__snake_case ,max_length=32 ,padding='max_length' ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
@require_torch
def lowerCamelCase__( self :int ) -> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] ,padding=__snake_case ,truncation=__snake_case ,return_tensors='pt' )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual(batch.input_ids.shape ,(2, 10_24) )
@require_torch
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = ['A long paragraph for summarization.']
a__ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ = tokenizer(__snake_case ,text_target=__snake_case ,return_tensors='pt' )
a__ = inputs['input_ids']
a__ = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(__snake_case ,**__snake_case )
a__ = self.tokenizer_class.from_pretrained(__snake_case ,**__snake_case )
a__ = 'A, <mask> AllenNLP sentence.'
a__ = tokenizer_r.encode_plus(__snake_case ,add_special_tokens=__snake_case ,return_token_type_ids=__snake_case )
a__ = tokenizer_p.encode_plus(__snake_case ,add_special_tokens=__snake_case ,return_token_type_ids=__snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
a__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
a__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__snake_case ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__snake_case ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 109 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : Optional[int] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case__ : List[Any] = '''bart'''
snake_case__ : Union[str, Any] = True
@st.cache(allow_output_mutation=_snake_case )
def _snake_case ( ):
if LOAD_DENSE_INDEX:
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
lowerCAmelCase : List[str] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
lowerCAmelCase : Optional[int] = qar_model.eval()
else:
lowerCAmelCase, lowerCAmelCase : int = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
lowerCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
lowerCAmelCase : Optional[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
lowerCAmelCase : Any = sas_model.eval()
else:
lowerCAmelCase, lowerCAmelCase : Any = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_snake_case )
def _snake_case ( ):
if LOAD_DENSE_INDEX:
lowerCAmelCase : List[str] = faiss.StandardGpuResources()
lowerCAmelCase : Optional[Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
lowerCAmelCase : List[Any] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase : Union[str, Any] = faiss.IndexFlatIP(128 )
lowerCAmelCase : int = faiss.index_cpu_to_gpu(_snake_case , 1 , _snake_case )
wikiaab_gpu_index_flat.add(_snake_case ) # TODO fix for larger GPU
else:
lowerCAmelCase, lowerCAmelCase : List[str] = (None, None)
lowerCAmelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_snake_case )
def _snake_case ( ):
lowerCAmelCase : List[str] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
lowerCAmelCase : Any = elia['''train_eli5''']
lowerCAmelCase : int = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
lowerCAmelCase : Tuple = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_snake_case )
return (elia_train, eli5_train_q_index)
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = load_indexes()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = load_models()
snake_case__ , snake_case__ : Union[str, Any] = load_train_data()
def _snake_case ( _snake_case : int , _snake_case : Dict=10 ):
lowerCAmelCase : Tuple = embed_questions_for_retrieval([question] , _snake_case , _snake_case )
lowerCAmelCase, lowerCAmelCase : Any = eli5_train_q_index.search(_snake_case , _snake_case )
lowerCAmelCase : str = [elia_train[int(_snake_case )] for i in I[0]]
return nn_examples
def _snake_case ( _snake_case : List[Any] , _snake_case : str="wiki40b" , _snake_case : List[str]="dense" , _snake_case : Union[str, Any]=10 ):
if source == "none":
lowerCAmelCase, lowerCAmelCase : List[str] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase, lowerCAmelCase : Tuple = query_qa_dense_index(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
lowerCAmelCase, lowerCAmelCase : List[str] = query_es_index(
_snake_case , _snake_case , index_name='''english_wiki40b_snippets_100w''' , n_results=_snake_case , )
lowerCAmelCase : int = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
lowerCAmelCase : Any = '''question: {} context: {}'''.format(_snake_case , _snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _snake_case : None),
} )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : Dict , _snake_case : List[Any]=64 , _snake_case : int=256 , _snake_case : List[str]=False , _snake_case : Any=2 , _snake_case : List[Any]=0.95 , _snake_case : Tuple=0.8 ):
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = qa_sas_generate(
_snake_case , _snake_case , _snake_case , num_answers=1 , num_beams=_snake_case , min_len=_snake_case , max_len=_snake_case , do_sample=_snake_case , temp=_snake_case , top_p=_snake_case , top_k=_snake_case , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
snake_case__ : Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
snake_case__ : Tuple = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case__ : List[Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case__ : str = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
snake_case__ : List[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
snake_case__ : Tuple = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
snake_case__ : List[Any] = action_list.index(action_st)
snake_case__ : List[str] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
snake_case__ : List[Any] = show_type == '''Show full text of passages'''
else:
snake_case__ : Tuple = 3
snake_case__ : List[Any] = True
snake_case__ : List[str] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
snake_case__ : str = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
snake_case__ : Union[str, Any] = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
snake_case__ : Union[str, Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
snake_case__ : List[Any] = '''wiki40b'''
snake_case__ : Union[str, Any] = '''dense'''
snake_case__ : int = '''beam'''
snake_case__ : str = 2
snake_case__ : Dict = 64
snake_case__ : List[str] = 256
snake_case__ : Dict = None
snake_case__ : List[str] = None
snake_case__ : List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
snake_case__ : List[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
snake_case__ : List[str] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
snake_case__ : List[str] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
snake_case__ : Optional[Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
snake_case__ : Dict = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case__ : int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
snake_case__ : int = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
snake_case__ : List[str] = None
# start main text
snake_case__ : str = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
snake_case__ : Union[str, Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case__ : Optional[Any] = st.text_input('''Enter your question here:''', '''''')
else:
snake_case__ : int = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case__ , snake_case__ : str = make_support(question, source=wiki_source, method='''dense''', n_results=10)
snake_case__ , snake_case__ : Tuple = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
snake_case__ : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case__ : List[str] = support_list[:10]
snake_case__ : int = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
snake_case__ , snake_case__ : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case__ , snake_case__ : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
snake_case__ : int = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
snake_case__ : List[Any] = res[1].strip()
if sec_titles == "":
snake_case__ : Tuple = '''[{}]({})'''.format(res[0], wiki_url)
else:
snake_case__ : Optional[int] = sec_titles.split(''' & ''')
snake_case__ : Optional[Any] = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
snake_case__ : int = find_nearest_training(question)
snake_case__ : List[Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
snake_case__ : Dict = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
snake_case__ : Any = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 60 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : int = KandinskyVaaInpaintPipeline
_SCREAMING_SNAKE_CASE : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**_UpperCamelCase )
lowerCAmelCase__ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 'a hat'
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 122 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_55 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase__ = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
lowerCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 122 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self : str ) -> Any:
__magic_name__ : Union[str, Any] = self.dummy_uncond_unet
__magic_name__ : str = KarrasVeScheduler()
__magic_name__ : List[Any] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : int = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' ).images
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : str = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' , return_dict=_A )[0]
__magic_name__ : int = image[0, -3:, -3:, -1]
__magic_name__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Optional[int] = 'google/ncsnpp-celebahq-256'
__magic_name__ : List[str] = UNetaDModel.from_pretrained(_A )
__magic_name__ : int = KarrasVeScheduler()
__magic_name__ : str = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(num_inference_steps=20 , generator=_A , output_type='numpy' ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__magic_name__ : int = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 275 |
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase :Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 275 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__snake_case = logging.get_logger("""transformers.models.speecht5""")
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.weight_g''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.weight_v''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE__ = checkpoint[F'upsamples.{i}.1.weight_g']
SCREAMING_SNAKE_CASE__ = checkpoint[F'upsamples.{i}.1.weight_v']
SCREAMING_SNAKE_CASE__ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.weight_g''']
SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.weight_v''']
SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Dict:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGan(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ )
load_weights(orig_checkpoint['model']['generator'] , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = np.load(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE__ = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase_ ).float()
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase_ ).float()
model.save_pretrained(UpperCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 176 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A : Optional[Any] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
A : str = json.load(f)
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str , SCREAMING_SNAKE_CASE : Optional[int]):
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple):
_A : Union[str, Any] = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
])
@slow
def A ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_A : int = F'facebook/wmt19-{pair}'
_A : Dict = self.get_tokenizer(SCREAMING_SNAKE_CASE)
_A : List[str] = self.get_model(SCREAMING_SNAKE_CASE)
_A : Optional[int] = bleu_data[pair]['src']
_A : Optional[int] = bleu_data[pair]['tgt']
_A : List[str] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE , padding='longest').to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_A : List[Any] = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE)
_A : Tuple = calculate_bleu(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
print(SCREAMING_SNAKE_CASE)
self.assertGreaterEqual(scores['bleu'] , SCREAMING_SNAKE_CASE)
| 354 |
'''simple docstring'''
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda a_ : x % 36 )
a = numpy.vectorize(a_ )
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : numpy.ndarray):
_A : Union[str, Any] = self.modulus(SCREAMING_SNAKE_CASE) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : Optional[int] = encrypt_key.shape[0]
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
return self.key_string.index(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
return self.key_string[round(SCREAMING_SNAKE_CASE)]
def A ( self : List[Any]):
_A : Optional[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : int = det % len(self.key_string)
_A : str = len(self.key_string)
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string)) != 1:
_A : Optional[int] = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE)
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : List[Any] = [char for char in text.upper() if char in self.key_string]
_A : List[str] = chars[-1]
while len(SCREAMING_SNAKE_CASE) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE)
return "".join(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.process_text(text.upper())
_A : List[str] = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[Any] = text[i : i + self.break_key]
_A : Optional[int] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : List[str] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[
0
]
_A : str = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def A ( self : Union[str, Any]):
_A : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : Optional[int] = det % len(self.key_string)
_A : List[str] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_A : Union[str, Any] = i
break
_A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : str):
_A : List[str] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper())
_A : str = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[int] = text[i : i + self.break_key]
_A : Union[str, Any] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : Optional[int] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[0]
_A : Tuple = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
_A : List[Any] = int(input('Enter the order of the encryption key: ' ) )
_A : List[str] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowerCamelCase ):
_A : str = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
_A : Dict = HillCipher(numpy.array(lowerCamelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_A : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_A : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
_A : int = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227 | 0 |
"""simple docstring"""
import re
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(UpperCamelCase , UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 109 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A: List[str] = logging.get_logger(__name__)
A: Dict = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = 'conditional_detr'
__lowerCAmelCase : Union[str, Any] = ['past_key_values']
__lowerCAmelCase : int = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = backbone_config.get("""model_type""" )
UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Union[str, Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = use_timm_backbone
UpperCAmelCase : Optional[int] = backbone_config
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : Any = num_queries
UpperCAmelCase : Union[str, Any] = d_model
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : Union[str, Any] = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Any = decoder_layers
UpperCAmelCase : Optional[int] = decoder_attention_heads
UpperCAmelCase : Optional[int] = dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : Any = activation_function
UpperCAmelCase : Any = init_std
UpperCAmelCase : Tuple = init_xavier_std
UpperCAmelCase : Optional[int] = encoder_layerdrop
UpperCAmelCase : Any = decoder_layerdrop
UpperCAmelCase : Any = encoder_layers
UpperCAmelCase : Optional[Any] = auxiliary_loss
UpperCAmelCase : List[Any] = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : List[Any] = use_pretrained_backbone
UpperCAmelCase : Dict = dilation
# Hungarian matcher
UpperCAmelCase : Optional[int] = class_cost
UpperCAmelCase : List[str] = bbox_cost
UpperCAmelCase : List[str] = giou_cost
# Loss coefficients
UpperCAmelCase : List[Any] = mask_loss_coefficient
UpperCAmelCase : List[str] = dice_loss_coefficient
UpperCAmelCase : Optional[int] = cls_loss_coefficient
UpperCAmelCase : Union[str, Any] = bbox_loss_coefficient
UpperCAmelCase : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase : int = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase : Dict = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12
| 109 | 1 |
from math import pi, sqrt, tan
def UpperCamelCase__( UpperCamelCase__ : float )->float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase__( UpperCamelCase__ : float )->float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCamelCase__( UpperCamelCase__ : float )->float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
A__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCamelCase__( UpperCamelCase__ : float )->float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
A__ = (sidea + sidea + sidea) / 2
A__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase__( UpperCamelCase__ : float )->float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : float )->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : float )->float:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 39 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a__: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a__: list[int] = [ord(letter) for letter in string.ascii_lowercase]
a__: set[int] = {ord(char) for char in VALID_CHARS}
a__: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCamelCase__( UpperCamelCase__ : list[int] , UpperCamelCase__ : tuple[int, ...] )->str | None:
A__ = ""
A__ = 42
A__ = 42
A__ = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
A__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def UpperCamelCase__( UpperCamelCase__ : list[int] )->list[str]:
A__ = []
for key in product(UpperCamelCase__ , repeat=3 ):
A__ = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def UpperCamelCase__( UpperCamelCase__ : list[str] , UpperCamelCase__ : str )->list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCamelCase__( UpperCamelCase__ : str = "p059_cipher.txt" )->int:
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='''utf-8''' )
A__ = [int(UpperCamelCase__ ) for number in data.strip().split(''',''' )]
A__ = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
A__ = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
A__ = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 39 | 1 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> Dict:
for i in range(0 , a__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> Tuple:
for i in range(a__ , 0 , -1 ):
for _ in range(a__ , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def lowerCamelCase__ ( a__ : Any ) -> Optional[int]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(a__ ) # upper half
reverse_floyd(a__ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
_A = 1
while K:
_A = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_A = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 122 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 | 1 |
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a__ : Union[str, Any] =str(bin(lowerCAmelCase__ ) )[2:] # remove the leading "0b"
a__ : Optional[Any] =str(bin(lowerCAmelCase__ ) )[2:]
a__ : Union[str, Any] =max(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase__ ) , b_binary.zfill(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowercase : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowercase : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : str =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
a__ : Any =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : Any =text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
a__ : Tuple =text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : List[Any] =text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
a__ : Any =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : List[str] =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
a__ : Optional[int] =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : int =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
import torch
a__ : Dict =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =pipeline("text-classification" )
a__ : Union[str, Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : Optional[Any] =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =pipeline("text-classification" , framework="tf" )
a__ : str =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : str =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a__ : List[Any] ="HuggingFace is in"
a__ : int =text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
a__ : Optional[int] =["HuggingFace is in ", "Paris is in France"]
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}, {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a__ : Union[str, Any] =text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
a__ : Optional[Any] =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N, [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N] , )
a__ : List[str] ={"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a__ : Any =[["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a__ : Optional[int] =text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 148 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """mra"""
def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=1 , A_=0.02 , A_=1e-5 , A_="absolute" , A_=4 , A_="full" , A_=0 , A_=0 , A_=1 , A_=0 , A_=2 , **A_ , ) ->Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : List[Any] = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : List[Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : Tuple = block_per_row
__lowerCAmelCase : List[Any] = approx_mode
__lowerCAmelCase : str = initial_prior_first_n_blocks
__lowerCAmelCase : Optional[Any] = initial_prior_diagonal_n_blocks
| 275 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = (3, 32, 128)
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) )
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
__lowerCAmelCase : Union[str, Any] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) )
return image_input
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__lowerCAmelCase : int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' )
__lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Any = '''test'''
__lowerCAmelCase : Dict = processor(text=A_ )
__lowerCAmelCase : str = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = '''test'''
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Optional[int] = processor.char_decode(A_ )
__lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ )
__lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.get_image_processor()
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 )
__lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 )
__lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 )
__lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 275 | 1 |
def snake_case ( snake_case__ :int = 1_000_000) -> int:
_A = set(range(3 , snake_case__ , 2))
primes.add(2)
for p in range(3 , snake_case__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__)))
_A = [float(snake_case__) for n in range(limit + 1)]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(F'''{solution() = }''')
| 81 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_SCREAMING_SNAKE_CASE = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
_SCREAMING_SNAKE_CASE = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
_SCREAMING_SNAKE_CASE = 0
for log in Path().glob('*.log'):
_SCREAMING_SNAKE_CASE = 0
with open(log, 'r') as f:
for line in f:
_SCREAMING_SNAKE_CASE = json.loads(line)
if line.get('nodeid', '') != "":
_SCREAMING_SNAKE_CASE = line['nodeid']
if line.get('duration', None) is not None:
_SCREAMING_SNAKE_CASE = F'''{line["duration"]:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_SCREAMING_SNAKE_CASE = []
log.unlink()
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
for test in failed_tests:
_SCREAMING_SNAKE_CASE = test[0].split('::')
_SCREAMING_SNAKE_CASE = data[0].split('/')[-1]
if data[0] not in filesafailed:
_SCREAMING_SNAKE_CASE = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_SCREAMING_SNAKE_CASE = [test[0] for test in failed_table]
_SCREAMING_SNAKE_CASE = list(set(files))
# Count number of instances in failed_tests
_SCREAMING_SNAKE_CASE = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_SCREAMING_SNAKE_CASE = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
_SCREAMING_SNAKE_CASE = 'Too many failed tests, please see the full report in the Action results.'
_SCREAMING_SNAKE_CASE = len(err) + 10
_SCREAMING_SNAKE_CASE = message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
_SCREAMING_SNAKE_CASE = 'No failed tests! 🤗'
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
_SCREAMING_SNAKE_CASE = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_SCREAMING_SNAKE_CASE = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
_SCREAMING_SNAKE_CASE = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_SCREAMING_SNAKE_CASE = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_SCREAMING_SNAKE_CASE = row[0]
else:
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 81 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _lowercase ( snake_case_ ):
lowercase = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowercase = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowercase = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowercase = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowercase = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : str = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase_ : List[Any] = v.to_dict()
return d
| 175 |
def a( A : list ) -> list:
"""simple docstring"""
if any(not isinstance(A , A ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 227 | 0 |
from __future__ import annotations
__snake_case = "Muhammad Umer Farooq"
__snake_case = "MIT"
__snake_case = "1.0.0"
__snake_case = "Muhammad Umer Farooq"
__snake_case = "contact@muhammadumerfarooq.me"
__snake_case = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
super().__init__()
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = domain
def A_ ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
SCREAMING_SNAKE_CASE__ = parse.urljoin(self.domain , _a )
self.urls.append(_a )
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(_snake_case ).split('.' )[-2:] )
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
return parse.urlparse(_snake_case ).netloc
def _lowercase ( UpperCamelCase_ = "https://github.com" ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_domain_name(_snake_case )
# Initialize the parser
SCREAMING_SNAKE_CASE__ = Parser(_snake_case )
try:
# Open URL
SCREAMING_SNAKE_CASE__ = requests.get(_snake_case )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
SCREAMING_SNAKE_CASE__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
SCREAMING_SNAKE_CASE__ = requests.get(_snake_case )
# Get the valid email.
SCREAMING_SNAKE_CASE__ = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_snake_case )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_snake_case )
if __name__ == "__main__":
__snake_case = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 369 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(number**0.5 )
return number == sq * sq
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> tuple[int, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE__ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def _lowercase ( UpperCamelCase_ = 35 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = Fraction(0 )
SCREAMING_SNAKE_CASE__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE__ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE__ = x_den * y_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=2
SCREAMING_SNAKE_CASE__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE__ = x_den * x_den * y_den * y_den
if is_sq(UpperCamelCase_ ) and is_sq(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=-1
SCREAMING_SNAKE_CASE__ = x_num * y_num
SCREAMING_SNAKE_CASE__ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=2
SCREAMING_SNAKE_CASE__ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCamelCase_ ) and is_sq(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
for num, den in unique_s:
total += Fraction(UpperCamelCase_ , UpperCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 169 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
with open(UpperCAmelCase , encoding='utf-8' ) as input_file:
_UpperCAmelCase = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
_UpperCAmelCase = input_file.read()
_UpperCAmelCase = regexp.search(UpperCAmelCase )
return match
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
with open(UpperCAmelCase , encoding='utf-8' ) as input_file:
_UpperCAmelCase = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
_UpperCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCAmelCase = regexp.finditer(UpperCAmelCase )
_UpperCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = Path('./datasets' )
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = Path('./datasets' )
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 39 |
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 10
def _A ( A__ ):
"""simple docstring"""
__lowercase = 1
__lowercase = max(lowerCAmelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowercase = [[] for _ in range(lowerCAmelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowercase = int((i / placement) % RADIX )
buckets[tmp].append(lowerCAmelCase__ )
# put each buckets' contents into list_of_ints
__lowercase = 0
for b in range(lowerCAmelCase__ ):
for i in buckets[b]:
__lowercase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase__ ) , lowerCAmelCase__ )
return number - int(lowerCAmelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 101 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Union[str, Any] = ["""pixel_values"""]
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
snake_case : int = size if size is not None else {"shortest_edge": 224}
snake_case : int = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case : Dict = do_resize
snake_case : Optional[int] = size
snake_case : int = resample
snake_case : Union[str, Any] = do_center_crop
snake_case : Dict = crop_size
snake_case : Dict = do_rescale
snake_case : Any = rescale_factor
snake_case : Tuple = do_normalize
snake_case : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case : Tuple = do_convert_rgb
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : int = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = size if size is not None else self.size
snake_case : Dict = get_size_dict(SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = resample if resample is not None else self.resample
snake_case : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[int] = image_std if image_std is not None else self.image_std
snake_case : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : Optional[int] = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
snake_case : List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case : Optional[Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case : int = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case : Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
snake_case : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
snake_case : Tuple = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 148 | 0 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= [10, 20, 30, 40, 50, 60]
lowercase__ : int= [2, 4, 6, 8, 10, 12]
lowercase__ : Union[str, Any]= 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , "max_weight must greater than zero." )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , "Weight can not be negative." )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , "Profit can not be negative." )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , "max_weight must greater than zero." )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 150 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase__() ->List[Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowercase__ : Any= Path(A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowercase__ : Any= [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , A , with_cuda=A , extra_include_paths=[str(A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 150 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "nllb-moe"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=12_8112 , __A=1024 , __A=12 , __A=4096 , __A=16 , __A=12 , __A=4096 , __A=16 , __A=0.05 , __A=0.05 , __A=True , __A=True , __A="relu" , __A=1024 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.02 , __A=2 , __A=True , __A=False , __A="float32" , __A=False , __A=128 , __A=64 , __A=4 , __A=4 , __A=0.001 , __A=0.001 , __A="all" , __A=False , __A=False , __A=1.0 , __A=0.2 , __A=1 , __A=0 , __A=2 , __A=False , **__A , ) -> List[Any]:
a =vocab_size
a =max_position_embeddings
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =encoder_layerdrop
a =decoder_layerdrop
a =use_cache
a =encoder_layers
a =scale_embedding # scale factor will be sqrt(d_model) if True
a =router_z_loss_coef
a =router_aux_loss_coef
a =decoder_sparse_step
a =encoder_sparse_step
a =num_experts
a =expert_capacity
a =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
a =router_dtype
a =router_ignore_padding_tokens
a =batch_prioritized_routing
a =second_expert_policy
a =normalize_router_prob_before_dropping
a =moe_eval_capacity_token_fraction
a =moe_token_dropout
a =output_router_logits
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , **__A , )
| 81 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : str = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
lowerCamelCase_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _A ( lowercase ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
a =model_type_to_module_name(lowercase )
a =importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(lowercase , lowercase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase , '''__name__''' , lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a =importlib.import_module('''transformers''' )
if hasattr(lowercase , lowercase ):
return getattr(lowercase , lowercase )
return None
def _A ( lowercase , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , **lowercase , ):
"""simple docstring"""
a =get_file_from_repo(
lowercase , lowercase , cache_dir=lowercase , force_download=lowercase , resume_download=lowercase , proxies=lowercase , use_auth_token=lowercase , revision=lowercase , local_files_only=lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowercase , encoding='''utf-8''' ) as reader:
return json.load(lowercase )
class __A :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__A )
def SCREAMING_SNAKE_CASE ( cls , __A , **__A ) -> Dict:
a =kwargs.pop('''config''' , __A )
a =kwargs.pop('''trust_remote_code''' , __A )
a =True
a , a =ImageProcessingMixin.get_image_processor_dict(__A , **__A )
a =config_dict.get('''image_processor_type''' , __A )
a =None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
a =config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
a =config_dict.pop('''feature_extractor_type''' , __A )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
a =feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a =config_dict['''auto_map''']['''AutoFeatureExtractor''']
a =feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__A , __A ):
a =AutoConfig.from_pretrained(__A , **__A )
# It could be in `config.image_processor_type``
a =getattr(__A , '''image_processor_type''' , __A )
if hasattr(__A , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
a =config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
a =image_processor_class_from_name(__A )
a =image_processor_auto_map is not None
a =image_processor_class is not None or type(__A ) in IMAGE_PROCESSOR_MAPPING
a =resolve_trust_remote_code(
__A , __A , __A , __A )
if has_remote_code and trust_remote_code:
a =get_class_from_dynamic_module(
__A , __A , **__A )
a =kwargs.pop('''code_revision''' , __A )
if os.path.isdir(__A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__A , **__A )
elif image_processor_class is not None:
return image_processor_class.from_dict(__A , **__A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__A ) in IMAGE_PROCESSOR_MAPPING:
a =IMAGE_PROCESSOR_MAPPING[type(__A )]
return image_processor_class.from_dict(__A , **__A )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def SCREAMING_SNAKE_CASE ( __A , __A ) -> Any:
IMAGE_PROCESSOR_MAPPING.register(__A , __A )
| 81 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a__ :
def __init__( self : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : List[str]=3, lowerCAmelCase : Tuple=32, lowerCAmelCase : Any=3, lowerCAmelCase : Union[str, Any]=10, lowerCAmelCase : Optional[int]=[8, 16, 32, 64], lowerCAmelCase : int=[1, 1, 2, 1], lowerCAmelCase : List[Any]=True, lowerCAmelCase : int=True, lowerCAmelCase : Optional[Any]="relu", lowerCAmelCase : int=3, lowerCAmelCase : Optional[int]=None, lowerCAmelCase : List[str]=["stage2", "stage3", "stage4"], lowerCAmelCase : Optional[Any]=[2, 3, 4], lowerCAmelCase : List[str]=1, ) -> int:
lowercase : Dict = parent
lowercase : Optional[int] = batch_size
lowercase : Tuple = image_size
lowercase : List[Any] = num_channels
lowercase : Tuple = embeddings_size
lowercase : List[str] = hidden_sizes
lowercase : Dict = depths
lowercase : int = is_training
lowercase : Union[str, Any] = use_labels
lowercase : Optional[Any] = hidden_act
lowercase : int = num_labels
lowercase : Optional[int] = scope
lowercase : List[Any] = len(_UpperCAmelCase )
lowercase : List[Any] = out_features
lowercase : str = out_indices
lowercase : Any = num_groups
def lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[Any] = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size], self.num_labels )
lowercase : str = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> List[Any]:
return BitConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, num_groups=self.num_groups, )
def lowercase ( self : str, lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str] ) -> str:
lowercase : List[str] = BitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase ( self : Tuple, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[str] ) -> Union[str, Any]:
lowercase : Dict = self.num_labels
lowercase : Optional[Any] = BitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase : str = model(_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str], lowerCAmelCase : Optional[Any] ) -> str:
lowercase : Tuple = BitBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase : str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : List[str] = None
lowercase : Union[str, Any] = BitBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase : Tuple = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase ( self : int ) -> Dict:
lowercase : Any = self.prepare_config_and_inputs()
lowercase : str = config_and_inputs
lowercase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
_lowerCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowercase ( self : Optional[int] ) -> Optional[int]:
lowercase : List[Any] = BitModelTester(self )
lowercase : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase )
def lowercase ( self : Tuple ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : int ) -> Tuple:
return
@unittest.skip(reason='Bit does not output attentions' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def lowercase ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def lowercase ( self : str ) -> Dict:
pass
def lowercase ( self : List[str] ) -> str:
lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(_UpperCAmelCase )
lowercase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Dict = [*signature.parameters.keys()]
lowercase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def lowercase ( self : Any ) -> int:
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase ( self : Tuple ) -> List[str]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def lowercase ( self : Optional[int] ) -> int:
lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(config=_UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
self.assertTrue(
torch.all(module.bias == 0 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
def lowercase ( self : str ) -> int:
def check_hidden_states_output(lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : Tuple ):
lowercase : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
lowercase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Any = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ), expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : str = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : Tuple = layer_type
lowercase : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Union[str, Any] = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def lowercase ( self : List[str] ) -> Tuple:
pass
def lowercase ( self : Optional[int] ) -> str:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase ( self : Dict ) -> Any:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = BitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowercase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ) -> Tuple:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase ( self : List[Any] ) -> Dict:
lowercase : int = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase )
lowercase : Optional[int] = self.default_image_processor
lowercase : Optional[int] = prepare_img()
lowercase : Union[str, Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase : int = model(**_UpperCAmelCase )
# verify the logits
lowercase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
lowercase : List[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1e-4 ) )
@require_torch
class a__ ( _UpperCAmelCase, unittest.TestCase ):
_lowerCamelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCamelCase = BitConfig
_lowerCamelCase = False
def lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase : Tuple = BitModelTester(self )
| 352 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionInstructPixaPixPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : str ) -> str:
torch.manual_seed(0 )
lowercase : int = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
lowercase : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
lowercase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
lowercase : Optional[int] = CLIPTextModel(lowerCAmelCase )
lowercase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : int, lowerCAmelCase : str, lowerCAmelCase : Tuple=0 ) -> Tuple:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Any = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : str = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> Optional[Any]:
lowercase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : int = self.get_dummy_components()
lowercase : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : Union[str, Any] = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Union[str, Any] = sd_pipe(**lowerCAmelCase ).images
lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : str = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> int:
lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : Union[str, Any] = self.get_dummy_components()
lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : Union[str, Any] = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = 'french fries'
lowercase : Tuple = sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase )
lowercase : Optional[Any] = output.images
lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Dict = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> str:
lowercase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : str = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inputs(lowerCAmelCase )
lowercase : int = [inputs['prompt']] * 2
lowercase : Dict = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
lowercase : Optional[int] = torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
lowercase : List[Any] = image / 2 + 0.5
lowercase : List[str] = image.permute(0, 3, 1, 2 )
lowercase : List[str] = image.repeat(2, 1, 1, 1 )
lowercase : Dict = sd_pipe(**lowerCAmelCase ).images
lowercase : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase : Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Union[str, Any] ) -> Any:
lowercase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] = self.get_dummy_components()
lowercase : str = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear' )
lowercase : Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : str = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Dict = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Dict = sd_pipe(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -3:, -3:, -1]
lowercase : Optional[int] = [round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase : List[str] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : int ) -> Optional[Any]:
lowercase : List[Any] = self.get_dummy_components()
lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : List[str] = VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase )
lowercase : int = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Dict = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='pt' ) )[0]
lowercase : Optional[Any] = components['vae']
lowercase : str = self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase : Optional[int] = pipe(**lowerCAmelCase )[0]
lowercase : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase, 1e-4, 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def lowercase ( self : List[str] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str], lowerCAmelCase : int=0 ) -> str:
lowercase : Dict = torch.manual_seed(lowerCAmelCase )
lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
lowercase : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : int ) -> str:
lowercase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : Optional[Any] = self.get_inputs()
lowercase : List[Any] = pipe(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : List[str] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase )
lowercase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : List[Any] = self.get_inputs()
lowercase : Tuple = pipe(**lowerCAmelCase ).images
lowercase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : str = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase )
lowercase : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : Any = self.get_inputs()
lowercase : Union[str, Any] = pipe(**lowerCAmelCase ).images
lowercase : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : int = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> Dict:
lowercase : Dict = 0
def callback_fn(lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : torch.FloatTensor ) -> None:
lowercase : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Union[str, Any] = latents[0, -3:, -3:, -1]
lowercase : List[str] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Union[str, Any] = latents[0, -3:, -3:, -1]
lowercase : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase : Union[str, Any] = False
lowercase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Dict = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Dict = self.get_inputs()
lowercase : List[Any] = pipe(**lowerCAmelCase )
lowercase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase : Optional[Any] = inputs['image'].resize((504, 504) )
lowercase : Union[str, Any] = 'timbrooks/instruct-pix2pix'
lowercase : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase, safety_checker=lowerCAmelCase, )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : str = pipe(**lowerCAmelCase )
lowercase : int = output.images[0]
lowercase : int = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase : Union[str, Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 53 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = set(UpperCamelCase ), [start]
while stack:
lowerCAmelCase__ : Any = stack.pop()
explored.add(UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase )
return explored
_lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """markuplm"""
def __init__( self :Dict , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :List[Any]=768 , lowerCamelCase :Union[str, Any]=12 , lowerCamelCase :Optional[int]=12 , lowerCamelCase :List[str]=3072 , lowerCamelCase :Dict="gelu" , lowerCamelCase :List[str]=0.1 , lowerCamelCase :Union[str, Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :int=0.02 , lowerCamelCase :int=1e-12 , lowerCamelCase :Tuple=0 , lowerCamelCase :List[str]=0 , lowerCamelCase :int=2 , lowerCamelCase :Optional[int]=256 , lowerCamelCase :List[str]=1024 , lowerCamelCase :Optional[Any]=216 , lowerCamelCase :str=1001 , lowerCamelCase :List[str]=32 , lowerCamelCase :Dict=50 , lowerCamelCase :int="absolute" , lowerCamelCase :Union[str, Any]=True , lowerCamelCase :Dict=None , **lowerCamelCase :List[Any] , ) -> int:
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
# additional properties
UpperCAmelCase__ = max_depth
UpperCAmelCase__ = max_xpath_tag_unit_embeddings
UpperCAmelCase__ = max_xpath_subs_unit_embeddings
UpperCAmelCase__ = tag_pad_id
UpperCAmelCase__ = subs_pad_id
UpperCAmelCase__ = xpath_unit_hidden_size
| 169 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowercase ( __snake_case = 2_000_000 ) -> Optional[int]:
__lowerCAmelCase : Optional[Any] = [0]
__lowerCAmelCase : Tuple = 42
for idx in range(1 ,ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCAmelCase : Union[str, Any] = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCAmelCase : Tuple = 0
# an estimate of b, using the quadratic formula
__lowerCAmelCase : Any = 42
# the largest integer less than b_estimate
__lowerCAmelCase : Optional[Any] = 42
# the largest integer less than b_estimate
__lowerCAmelCase : List[str] = 42
# the triangle number corresponding to b_floor
__lowerCAmelCase : str = 42
# the triangle number corresponding to b_ceil
__lowerCAmelCase : Tuple = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] ,1 ):
__lowerCAmelCase : List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCAmelCase : Union[str, Any] = floor(UpperCAmelCase_ )
__lowerCAmelCase : Any = ceil(UpperCAmelCase_ )
__lowerCAmelCase : List[Any] = triangle_numbers[b_floor]
__lowerCAmelCase : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase : str = triangle_b_first_guess * triangle_a
__lowerCAmelCase : Optional[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase : Union[str, Any] = triangle_b_second_guess * triangle_a
__lowerCAmelCase : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 352 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowercase ( __snake_case ) -> Dict:
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2] )}
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : List[str] = ArgumentParser(
"HuggingFace Datasets CLI tool" ,usage="datasets-cli <command> [<args>]" ,allow_abbrev=__snake_case )
__lowerCAmelCase : str = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
TestCommand.register_subcommand(__snake_case )
RunBeamCommand.register_subcommand(__snake_case )
DummyDataCommand.register_subcommand(__snake_case )
# Parse args
__lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_known_args()
if not hasattr(__snake_case ,"func" ):
parser.print_help()
exit(1 )
__lowerCAmelCase : List[Any] = parse_unknown_args(__snake_case )
# Run
__lowerCAmelCase : Union[str, Any] = args.func(__snake_case ,**__snake_case )
service.run()
if __name__ == "__main__":
main()
| 58 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__SCREAMING_SNAKE_CASE : int = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.state_dict()
def to_tf_var_name(lowercase__ ):
for patt, repl in iter(lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = name.replace(lowercase__ , lowercase__ )
return F'''bert/{name}'''
def create_tf_var(lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE : int = tf.get_variable(dtype=lowercase__ , shape=tensor.shape , name=lowercase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowercase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = to_tf_var_name(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE : Dict = torch_tensor.T
__SCREAMING_SNAKE_CASE : Optional[Any] = create_tf_var(tensor=lowercase__ , name=lowercase__ , session=lowercase__ )
tf.keras.backend.set_value(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = session.run(lowercase__ )
print(F'''Successfully created {tf_name}: {np.allclose(lowercase__ , lowercase__ )}''' )
__SCREAMING_SNAKE_CASE : Dict = tf.train.Saver(tf.trainable_variables() )
saver.save(lowercase__ , os.path.join(lowercase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _UpperCamelCase ( lowercase__=None ):
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowercase__ , required=lowercase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowercase__ , default=lowercase__ , required=lowercase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowercase__ , required=lowercase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowercase__ , required=lowercase__ , help='''Directory in which to save tensorflow model''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args(lowercase__ )
__SCREAMING_SNAKE_CASE : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowercase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 9 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : List[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__lowerCamelCase : str = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A_ ( _lowerCAmelCase ) -> str:
def remove_articles(_lowerCAmelCase ):
UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
UpperCamelCase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase )
UpperCamelCase : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase : Tuple = scount * numref
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase : Dict = ccount * numref
# KEEP
UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep
UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter
UpperCamelCase : Dict = sgramcounter_rep & rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Any = 1
UpperCamelCase : Any = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCamelCase : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep
UpperCamelCase : str = delgramcounter_rep - rgramcounter
UpperCamelCase : Any = sgramcounter_rep - rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Dict = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Tuple = 1
UpperCamelCase : Tuple = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
UpperCamelCase : int = len(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = ssent.split(" " )
UpperCamelCase : Dict = csent.split(" " )
UpperCamelCase : str = []
UpperCamelCase : Any = []
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : str = []
UpperCamelCase : str = []
UpperCamelCase : Dict = []
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Tuple = []
for rsent in rsents:
UpperCamelCase : List[Any] = rsent.split(" " )
UpperCamelCase : List[str] = []
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase )
elif tokenizer == "penn":
UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase )
else:
UpperCamelCase : Union[str, Any] = sentence
if not return_str:
UpperCamelCase : Tuple = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
UpperCamelCase : Optional[Any] = 0
for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] )
UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase )
return 100 * sari_score
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]:
UpperCamelCase : Optional[Any] = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
UpperCamelCase : Tuple = sacrebleu.corpus_bleu(
_lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({"exact": compute_em(predictions=A_ , references=A_ )} )
return result
| 52 | 0 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : List[str]=None , __lowercase : List[Any]=None ) -> Any:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_a )
@dataclass
class A_ :
_lowerCamelCase : List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
_lowerCamelCase : List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
_lowerCamelCase : List[int] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
_lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
_lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Benchmark training of model"""} )
_lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Verbose memory tracing"""} )
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
_lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Trace memory line by line"""} )
_lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Save result to a CSV file"""} )
_lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Save all print statements in a log file"""} )
_lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to print environment information"""} )
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
_lowerCamelCase : str = field(
default=f"inference_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
_lowerCamelCase : str = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
_lowerCamelCase : str = field(
default=f"train_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
_lowerCamelCase : str = field(
default=f"train_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
_lowerCamelCase : str = field(
default=f"env_info_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
_lowerCamelCase : str = field(
default=f"log_{round(time() )}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
_lowerCamelCase : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def lowercase ( self : int ):
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , _SCREAMING_SNAKE_CASE , )
def lowercase ( self : Dict ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowercase ( self : Dict ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def lowercase ( self : Optional[Any] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 364 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str = "cpu" , __lowercase : Union[str, None] = None ) -> None:
'''simple docstring'''
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowercase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_UpperCAmelCase = v.half()
if save_path is None: # overwrite src_path
_UpperCAmelCase = src_path
torch.save(__lowercase , __lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 156 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
snake_case = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
snake_case = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case = model(lowerCAmelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase , atol=1E-3 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
snake_case = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
snake_case = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case = model(lowerCAmelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase , atol=1E-3 ) )
| 150 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 1 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->np.ndarray:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = cva.getAffineTransform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cva.warpAffine(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
__A = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
__A = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A = gray_img.shape
# set different points to rotate image
__A = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A = plt.figure(1)
__A = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 254 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """facebook/bart-large-mnli"""
__magic_name__ :Any = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__magic_name__ :Optional[int] = """text_classifier"""
__magic_name__ :List[Any] = AutoTokenizer
__magic_name__ :str = AutoModelForSequenceClassification
__magic_name__ :int = ["""text""", ["""text"""]]
__magic_name__ :int = ["""text"""]
def snake_case ( self ):
'''simple docstring'''
super().setup()
lowerCAmelCase__ :Any = self.model.config
lowerCAmelCase__ :Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase__ :Optional[Any] = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [F"This example is {label}" for label in labels] , return_tensors='pt' , padding='max_length' , )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = outputs.logits
lowerCAmelCase__ :int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 254 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :Union[str, Any] , a :List[str] , a :List[str] ) -> int:
a = UniSpeechSatForSequenceClassification.from_pretrained(a , config=a )
a = downstream_dict['''projector.weight''']
a = downstream_dict['''projector.bias''']
a = downstream_dict['''model.post_net.linear.weight''']
a = downstream_dict['''model.post_net.linear.bias''']
return model
def _a ( a :Any , a :str , a :List[Any] ) -> Tuple:
a = UniSpeechSatForAudioFrameClassification.from_pretrained(a , config=a )
a = downstream_dict['''model.linear.weight''']
a = downstream_dict['''model.linear.bias''']
return model
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> Dict:
a = UniSpeechSatForXVector.from_pretrained(a , config=a )
a = downstream_dict['''connector.weight''']
a = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
a = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _a ( a :str , a :List[Any] , a :Optional[int] , a :Any ) -> str:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint['''Downstream''']
a = UniSpeechSatConfig.from_pretrained(a )
a = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
a = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
a = convert_classification(a , a , a )
elif arch.endswith('''ForAudioFrameClassification''' ):
a = convert_diarization(a , a , a )
elif arch.endswith('''ForXVector''' ):
a = convert_xvector(a , a , a )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowercase__ ( __lowercase : SplitDict ) -> int:
"""simple docstring"""
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
__UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 53 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self : str , snake_case__ : List[str]="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(snake_case__ , self ).__init__()
UpperCAmelCase__ : int = AutoModel.from_pretrained(snake_case__ , return_dict=snake_case__ )
UpperCAmelCase__ : str = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCAmelCase__ : Optional[Any] = torch.nn.Softmax(dim=1 )
def __a ( self : Optional[Any] , **snake_case__ : Optional[Any] ):
'''simple docstring'''
return self.bert(**snake_case__ ).last_hidden_state
def __a ( self : Any , snake_case__ : Any ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=snake_case__ )
def __a ( self : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(snake_case__ , snake_case__ ) )
def __a ( self : List[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = W_supports["sizes"].tolist()
UpperCAmelCase__ : str = W_supports["start_token_id"].item()
UpperCAmelCase__ : Dict = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase__ : str = self.BERT(**snake_case__ )
UpperCAmelCase__ : Tuple = self.BERT(**snake_case__ )
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Tuple = W_supports["input_ids"] == start_token_id
UpperCAmelCase__ : List[Any] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(snake_case__ ):
if i == 0:
UpperCAmelCase__ : str = 0
else:
UpperCAmelCase__ : Any = support_sizes[i - 1]
UpperCAmelCase__ : Dict = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase__ : Dict = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase__ : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase__ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase__ : List[str] = torch.vstack((p_starts, p_start) )
UpperCAmelCase__ : str = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase__ : List[str] = p_start
UpperCAmelCase__ : List[Any] = p_end
return p_starts, p_ends
| 353 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase ( __lowerCamelCase : int ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = [[False for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
return canvas
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->None:
for i, row in enumerate(__lowerCamelCase ):
for j, _ in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = bool(random.getrandbits(1 ) )
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCamelCase ):
for c, pt in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = __judge_point(
__lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_SCREAMING_SNAKE_CASE = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_SCREAMING_SNAKE_CASE = current_canvas.tolist()
return return_canvas
def lowerCamelCase ( __lowerCamelCase : bool , __lowerCamelCase : list[list[bool]] ) ->bool:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_SCREAMING_SNAKE_CASE = pt
if pt:
if alive < 2:
_SCREAMING_SNAKE_CASE = False
elif alive == 2 or alive == 3:
_SCREAMING_SNAKE_CASE = True
elif alive > 3:
_SCREAMING_SNAKE_CASE = False
else:
if alive == 3:
_SCREAMING_SNAKE_CASE = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 58 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE :str = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=a_ )
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = tmp_path_factory.getbasetemp() / "cache"
__A = test_hf_cache_home / "datasets"
__A = test_hf_cache_home / "metrics"
__A = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(a_ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(a_ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(a_ ) )
__A = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(a_ ) )
__A = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(a_ ) )
@pytest.fixture(autouse=a_ , scope="session" )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , a_ )
@pytest.fixture
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , a_ )
| 360 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,A : Optional[Any] ,A : List[Any] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : Tuple ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : float = 0.0 ,A : int = 50 ,A : Optional[bool] = None ,A : Optional[str] = "pil" ,A : bool = True ,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,A ):
__A = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__A = randn_tensor(A ,generator=A ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(A ,A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
A ,A ,A ,eta=A ,use_clipped_model_output=A ,generator=A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 ,1 )
__A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 124 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
lowercase_ : List[str] = mock.Mock()
lowercase_ : Optional[int] = 5_00
lowercase_ : str = {}
lowercase_ : Dict = HTTPError
lowercase_ : Tuple = {}
# Download this model to make sure it's in the cache.
lowercase_ : Optional[Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
lowercase_ : int = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A ( self : str ) -> int:
# A mock response for an HTTP head request to emulate server down
lowercase_ : List[str] = mock.Mock()
lowercase_ : Any = 5_00
lowercase_ : Union[str, Any] = {}
lowercase_ : int = HTTPError
lowercase_ : Any = {}
# Download this model to make sure it's in the cache.
lowercase_ : Union[str, Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
lowercase_ : int = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : List[Any] ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
try:
lowercase_ : List[str] = tempfile.mktemp()
with open(A , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , A )
lowercase_ : int = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , A )
lowercase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def A ( self : Optional[Any] ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
lowercase_ : Tuple = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A ( cls : Optional[Any] ) -> Optional[int]:
lowercase_ : str = TOKEN
HfFolder.save_token(A )
@classmethod
def A ( cls : Union[str, Any] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def A ( self : Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Tuple = os.path.join(A , '''vocab.txt''' )
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase_ : Dict = BertTokenizer(A )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
lowercase_ : int = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A , repo_id='''test-tokenizer''' , push_to_hub=A , use_auth_token=self._token )
lowercase_ : int = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A ( self : Optional[int] ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = os.path.join(A , '''vocab.txt''' )
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase_ : Union[str, Any] = BertTokenizer(A )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
lowercase_ : Union[str, Any] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=A , use_auth_token=self._token )
lowercase_ : Dict = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A ( self : Optional[Any] ) -> List[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : List[str] = os.path.join(A , '''vocab.txt''' )
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase_ : Any = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[Any] = os.path.join(A , '''vocab.txt''' )
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase_ : List[Any] = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
lowercase_ : int = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
lowercase_ : str = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Dict ) -> Any:
lowercase_ : str = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def A ( self : Dict ) -> List[str]:
lowercase_ : int = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def A ( self : Any ) -> List[str]:
lowercase_ : List[Any] = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Dict = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A ( self : str ) -> Union[str, Any]:
lowercase_ : Optional[int] = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A ( self : Optional[int] ) -> str:
lowercase_ : Optional[Any] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def A ( self : Optional[int] ) -> str:
lowercase_ : Optional[Any] = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def A ( self : List[str] ) -> Optional[int]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowercase_ : Union[str, Any] = Trie()
lowercase_ : Dict = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A , ['''AB''', '''C'''] )
| 33 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Initialise PyTorch model
__lowercase : Tuple = RemBertConfig.from_json_file(__lowerCAmelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(__lowerCAmelCase ) ) )
__lowercase : Union[str, Any] = RemBertModel(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__lowerCAmelCase ) )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 156 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[str] , A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : str = hf_hub_url(repo_id=A__ , path=A__ , revision=A__ )
assert url == f'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A__ )}'
| 370 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 42
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , lowerCamelCase : int = 6_55_36 , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 0 , lowerCamelCase : str = "fourier" , lowerCamelCase : bool = True , lowerCamelCase : bool = False , lowerCamelCase : float = 0.0 , lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase : str = None , lowerCamelCase : Tuple[int] = (32, 32, 64) , lowerCamelCase : str = None , lowerCamelCase : int = 8 , lowerCamelCase : int = 1 , lowerCamelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase_ : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
lowerCAmelCase_ : List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase_ : Optional[int] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase_ : List[str] = block_out_channels[0] * 4
lowerCAmelCase_ : List[str] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
lowerCAmelCase_ : str = nn.ModuleList([] )
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = nn.ModuleList([] )
lowerCAmelCase_ : List[str] = None
# down
lowerCAmelCase_ : List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = output_channel
lowerCAmelCase_ : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase_ : int = i == len(lowerCamelCase ) - 1
lowerCAmelCase_ : List[str] = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
lowerCAmelCase_ : Dict = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
lowerCAmelCase_ : List[str] = list(reversed(lowerCamelCase ) )
lowerCAmelCase_ : str = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase_ : Optional[int] = out_channels
else:
lowerCAmelCase_ : Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Any = output_channel
lowerCAmelCase_ : str = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
lowerCAmelCase_ : int = i == len(lowerCamelCase ) - 1
lowerCAmelCase_ : Any = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = output_channel
# out
lowerCAmelCase_ : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase_ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def __lowercase ( self : List[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : Union[torch.Tensor, float, int] , lowerCamelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCAmelCase_ : Optional[Any] = timestep
if not torch.is_tensor(lowerCamelCase ):
lowerCAmelCase_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ : Dict = timesteps[None].to(sample.device )
lowerCAmelCase_ : int = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
lowerCAmelCase_ : int = self.time_mlp(lowerCamelCase )
else:
lowerCAmelCase_ : Tuple = timestep_embed[..., None]
lowerCAmelCase_ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase_ : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
lowerCAmelCase_, lowerCAmelCase_ : Any = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase_ : int = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase_ : str = down_block_res_samples[-1:]
lowerCAmelCase_ : Union[str, Any] = down_block_res_samples[:-1]
lowerCAmelCase_ : List[str] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
lowerCAmelCase_ : Union[str, Any] = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 89 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.