code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "gpt_neox"
def __init__( self ,_A=5_0432 ,_A=6144 ,_A=44 ,_A=64 ,_A=2_4576 ,_A="gelu" ,_A=0.2_5 ,_A=1_0000 ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A=2048 ,_A=0.0_2 ,_A=1E-5 ,_A=True ,_A=0 ,_A=2 ,_A=False ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(bos_token_id=_A ,eos_token_id=_A ,**_A )
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Tuple = rotary_pct
_lowerCAmelCase : Optional[Any] = rotary_emb_base
_lowerCAmelCase : str = attention_dropout
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : List[Any] = classifier_dropout
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : int = tie_word_embeddings
_lowerCAmelCase : List[Any] = use_parallel_residual
_lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_lowerCAmelCase : int = self.rope_scaling.get('type' ,_A )
_lowerCAmelCase : Any = self.rope_scaling.get('factor' ,_A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A ,_A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
_lowerCAmelCase = range(2, 2_0 + 1)
_lowerCAmelCase = [1_0**k for k in range(ks[-1] + 1)]
_lowerCAmelCase = {}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase ) ) )
_lowerCAmelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase ) , _lowerCamelCase ) ) )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = 0, 0
_lowerCAmelCase : int = n - i
_lowerCAmelCase : List[str] = memo.get(_lowerCamelCase )
if sub_memo is not None:
_lowerCAmelCase : str = sub_memo.get(_lowerCamelCase )
if jumps is not None and len(_lowerCamelCase ) > 0:
# find and make the largest jump without going over
_lowerCAmelCase : str = -1
for _k in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCAmelCase : Optional[int] = _k
break
if max_jump >= 0:
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCAmelCase : str = diff + c
for j in range(min(_lowerCamelCase , len(_lowerCamelCase ) ) ):
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = divmod(_lowerCamelCase , 10 )
if new_c > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
_lowerCAmelCase : str = []
else:
_lowerCAmelCase : str = {c: []}
_lowerCAmelCase : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCAmelCase, _lowerCAmelCase : str = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCAmelCase, _lowerCAmelCase : str = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
_lowerCAmelCase : Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCAmelCase : Optional[int] = 0
while j < len(_lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCAmelCase : Tuple = i
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = 0, 0, 0
for j in range(len(_lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCAmelCase : Optional[Any] = ds_c + ds_b
diff += addend
_lowerCAmelCase : int = 0
for j in range(_lowerCamelCase ):
_lowerCAmelCase : str = a_i[j] + addend
_lowerCAmelCase, _lowerCAmelCase : Tuple = divmod(_lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return diff, i - start_i
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
_lowerCAmelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCAmelCase, _lowerCAmelCase : Tuple = divmod(_lowerCamelCase , 10 )
_lowerCAmelCase : Dict = addend // 10 + quotient
else:
_lowerCAmelCase : Optional[Any] = s
_lowerCAmelCase : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCAmelCase, _lowerCAmelCase : int = divmod(_lowerCamelCase , 10 )
digits.append(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase = 10**15 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [1]
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[int] = 0
while True:
_lowerCAmelCase, _lowerCAmelCase : int = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCAmelCase : Tuple = 0
for j in range(len(_lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_lowerCAmelCase = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_lowerCAmelCase = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCAmelCase : Tuple = bs[:]
_lowerCAmelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : int = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = set()
_lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : List[Any] = char
return pairs
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A ,_A="replace" ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A=False ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else bos_token
_lowerCAmelCase : str = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else eos_token
_lowerCAmelCase : Optional[int] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else sep_token
_lowerCAmelCase : Dict = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else cls_token
_lowerCAmelCase : List[str] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else unk_token
_lowerCAmelCase : str = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Dict = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
super().__init__(
errors=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,add_prefix_space=_A ,**_A ,)
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : List[Any] = json.load(_A )
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Any = errors # how to handle errors in decoding
_lowerCAmelCase : str = bytes_to_unicode()
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : List[Any] = merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase : Any = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Dict = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : List[str] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Any = tuple(_A )
_lowerCAmelCase : Any = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : str = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : int = bigram
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : List[Any] = 0
while i < len(_A ):
try:
_lowerCAmelCase : Optional[int] = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[int] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Dict = tuple(_A )
_lowerCAmelCase : Optional[Any] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : Optional[Any] = get_pairs(_A )
_lowerCAmelCase : Optional[int] = ' '.join(_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for token in re.findall(self.pat ,_A ):
_lowerCAmelCase : str = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(' ' ) )
return bpe_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.decoder.get(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = ''.join(_A )
_lowerCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Tuple = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Union[str, Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : List[str] = 0
with open(_A ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : List[str] = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self ,_A ,_A=False ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
_lowerCAmelCase : List[str] = ' ' + text
return (text, kwargs)
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_A )
_lowerCAmelCase : str = ' '.join(_A )
_lowerCAmelCase : Optional[int] = self.encode(_A )
if len(_A ) > self.model_max_length:
_lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# pop unnecessary weights
_lowerCAmelCase : List[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
_lowerCAmelCase : Any = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : Tuple = sd.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : int = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : List[Any] = key.replace('.qkv_proj.' , '.q_proj.' )
_lowerCAmelCase : Optional[int] = key.replace('.qkv_proj.' , '.k_proj.' )
_lowerCAmelCase : Any = key.replace('.qkv_proj.' , '.v_proj.' )
_lowerCAmelCase : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
_lowerCAmelCase : int = q
_lowerCAmelCase : List[Any] = k
_lowerCAmelCase : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_checkpoint(_lowerCamelCase )
if config is not None:
_lowerCAmelCase : str = OPTConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = OPTConfig()
_lowerCAmelCase : Dict = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_lowerCAmelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_lowerCamelCase ):
if len(_lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowerCamelCase ) )
return data_lists
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = min(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = max(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCAmelCase : Any = f"""Invalid weight of {weight:f} provided"""
raise ValueError(_lowerCamelCase )
score_lists.append(_lowerCamelCase )
return score_lists
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = final_scores[j] + ele
return final_scores
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = get_data(_lowerCamelCase )
_lowerCAmelCase : str = calculate_each_score(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = generate_final_scores(_lowerCamelCase )
# append scores to source data
for i, ele in enumerate(_lowerCamelCase ):
source_data[i].append(_lowerCamelCase )
return source_data
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "audio-spectrogram-transformer"
def __init__( self ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1E-12 ,_A=16 ,_A=True ,_A=10 ,_A=10 ,_A=1024 ,_A=128 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : List[str] = frequency_stride
_lowerCAmelCase : Tuple = time_stride
_lowerCAmelCase : Any = max_length
_lowerCAmelCase : Any = num_mel_bins
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
return array
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = min(_lowerCamelCase ), max(_lowerCamelCase )
# Compute the variables
_lowerCAmelCase : Union[str, Any] = _max - _min + 1
_lowerCAmelCase, _lowerCAmelCase : Any = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase : Tuple = i - _min
_lowerCAmelCase : Tuple = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase : int = 0
for i in range(_lowerCamelCase ):
while holes_repeat[i] > 0:
_lowerCAmelCase : Union[str, Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("""Enter numbers separated by comma:\n""")
_lowerCAmelCase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square(_lowerCamelCase , _lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowerCAmelCase : int = update_area_of_max_square(_lowerCamelCase , col + 1 )
_lowerCAmelCase : List[str] = update_area_of_max_square(row + 1 , col + 1 )
_lowerCAmelCase : List[Any] = update_area_of_max_square(row + 1 , _lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : Any = 1 + min([right, diagonal, down] )
_lowerCAmelCase : List[Any] = max(largest_square_area[0] , _lowerCamelCase )
return sub_problem_sol
else:
return 0
_lowerCAmelCase : Union[str, Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowerCAmelCase : List[str] = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase )
_lowerCAmelCase : List[str] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : int = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Any = max(largest_square_area[0] , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
_lowerCAmelCase : int = [0]
_lowerCAmelCase : Tuple = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_lowerCAmelCase : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : str = dp_array[row][col + 1]
_lowerCAmelCase : int = dp_array[row + 1][col + 1]
_lowerCAmelCase : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
_lowerCAmelCase : Tuple = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = max(dp_array[row][col] , _lowerCamelCase )
else:
_lowerCAmelCase : List[str] = 0
return largest_square_area
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [0] * (cols + 1)
_lowerCAmelCase : List[Any] = [0] * (cols + 1)
_lowerCAmelCase : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : Optional[Any] = current_row[col + 1]
_lowerCAmelCase : int = next_row[col + 1]
_lowerCAmelCase : Union[str, Any] = next_row[col]
if mat[row][col] == 1:
_lowerCAmelCase : Optional[int] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = max(current_row[col] , _lowerCamelCase )
else:
_lowerCAmelCase : str = 0
_lowerCAmelCase : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_A ,speech_processor=_A ,vae=_A ,text_encoder=_A ,tokenizer=_A ,unet=_A ,scheduler=_A ,feature_extractor=_A ,)
def __lowerCamelCase ( self ,_A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
_lowerCAmelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self ,_A ,_A=1_6000 ,_A = 512 ,_A = 512 ,_A = 50 ,_A = 7.5 ,_A = None ,_A = 1 ,_A = 0.0 ,_A = None ,_A = None ,_A = "pil" ,_A = True ,_A = None ,_A = 1 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
_A ,return_tensors='pt' ,sampling_rate=_A ).input_features.to(self.device )
_lowerCAmelCase : List[str] = self.speech_model.generate(_A ,max_length=48_0000 )
_lowerCAmelCase : Optional[Any] = self.speech_processor.tokenizer.batch_decode(_A ,skip_special_tokens=_A ,normalize=_A )[
0
]
if isinstance(_A ,_A ):
_lowerCAmelCase : str = 1
elif isinstance(_A ,_A ):
_lowerCAmelCase : Tuple = len(_A )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A ,_A ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_A )}.""" )
# get prompt text embeddings
_lowerCAmelCase : Optional[int] = self.tokenizer(
_A ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
_lowerCAmelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = text_embeddings.shape
_lowerCAmelCase : Any = text_embeddings.repeat(1 ,_A ,1 )
_lowerCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt ,_A ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : List[str]
if negative_prompt is None:
_lowerCAmelCase : str = [''] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
F""" {type(_A )}.""" )
elif isinstance(_A ,_A ):
_lowerCAmelCase : int = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase : Dict = negative_prompt
_lowerCAmelCase : str = text_input_ids.shape[-1]
_lowerCAmelCase : str = self.tokenizer(
_A ,padding='max_length' ,max_length=_A ,truncation=_A ,return_tensors='pt' ,)
_lowerCAmelCase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : Any = uncond_embeddings.shape[1]
_lowerCAmelCase : Any = uncond_embeddings.repeat(1 ,_A ,1 )
_lowerCAmelCase : str = uncond_embeddings.view(batch_size * num_images_per_prompt ,_A ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase : int = torch.randn(_A ,generator=_A ,device='cpu' ,dtype=_A ).to(
self.device )
else:
_lowerCAmelCase : Tuple = torch.randn(_A ,generator=_A ,device=self.device ,dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase : int = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : int = {}
if accepts_eta:
_lowerCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : Tuple = self.scheduler.scale_model_input(_A ,_A )
# predict the noise residual
_lowerCAmelCase : str = self.unet(_A ,_A ,encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase, _lowerCAmelCase : Any = noise_pred.chunk(2 )
_lowerCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Optional[int] = self.scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A ,_A ,_A )
_lowerCAmelCase : Any = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase : Union[str, Any] = self.vae.decode(_A ).sample
_lowerCAmelCase : List[str] = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Any = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A ,nsfw_content_detected=_A )
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase = 1.6_021E-19 # units = C
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_lowerCAmelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A=None ,_A=1 ):
'''simple docstring'''
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : Union[str, Any] = dataset
_lowerCAmelCase : List[Any] = len(_A ) if n_tasks is None else n_tasks
_lowerCAmelCase : List[Any] = n_copies
def __iter__( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
_lowerCAmelCase : List[str] = self.tokenizer(_A ,padding=_A ,return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = start_length
_lowerCAmelCase : Tuple = eof_strings
_lowerCAmelCase : str = tokenizer
def __call__( self ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowerCAmelCase : str = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_A )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
_lowerCAmelCase : List[str] = batch['ids'].shape[-1]
_lowerCAmelCase : Optional[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
_lowerCAmelCase : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_lowerCAmelCase, _lowerCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_lowerCAmelCase : int = generated_tokens.cpu().numpy()
_lowerCAmelCase : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowerCAmelCase : int = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = HfArgumentParser(_lowerCamelCase )
_lowerCAmelCase : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowerCAmelCase : Union[str, Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowerCAmelCase : Dict = 'false'
if args.num_workers is None:
_lowerCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowerCAmelCase : Optional[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_lowerCAmelCase : Optional[Any] = tokenizer.eos_token
_lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowerCAmelCase : str = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
_lowerCAmelCase : Any = load_dataset('openai_humaneval' )
_lowerCAmelCase : Optional[Any] = load_metric('code_eval' )
_lowerCAmelCase : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
_lowerCAmelCase : Dict = args.n_samples // args.batch_size
_lowerCAmelCase : Optional[int] = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowerCAmelCase : Dict = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowerCAmelCase : Optional[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
_lowerCAmelCase, _lowerCAmelCase : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
_lowerCAmelCase : str = []
for task in tqdm(range(_lowerCamelCase ) ):
_lowerCAmelCase : List[Any] = human_eval['test'][task]['test']
_lowerCAmelCase : Optional[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = v.conjugate().T
_lowerCAmelCase : Union[str, Any] = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_lowerCAmelCase : Dict = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), f"""{a} is not hermitian."""
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), f"""{a} is not hermitian."""
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCAmelCase = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_lowerCAmelCase = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_lowerCAmelCase = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_lowerCAmelCase = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowerCAmelCase, _lowerCAmelCase : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase__ ( _lowerCamelCase = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize('hand, expected' , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
_lowerCAmelCase : List[str] = poker_hands.copy()
shuffle(_lowerCamelCase )
_lowerCAmelCase : int = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = PokerHand('2C 4S AS 3D 5C' )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[int] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , 'poker_hands.txt' )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
_lowerCAmelCase : Dict = line[:14].strip()
_lowerCAmelCase : List[str] = line[15:].strip()
_lowerCAmelCase, _lowerCAmelCase : List[str] = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
_lowerCAmelCase : Dict = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DPTConfig()
if "large" in checkpoint_url:
_lowerCAmelCase : Tuple = 1024
_lowerCAmelCase : Optional[Any] = 4096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : Any = 16
_lowerCAmelCase : Tuple = [5, 11, 17, 23]
_lowerCAmelCase : str = [256, 512, 1024, 1024]
_lowerCAmelCase : int = (1, 384, 384)
if "ade" in checkpoint_url:
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = 150
_lowerCAmelCase : Optional[Any] = 'huggingface/label-files'
_lowerCAmelCase : Any = 'ade20k-id2label.json'
_lowerCAmelCase : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : int = idalabel
_lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCAmelCase : Tuple = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowerCAmelCase : Tuple = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowerCAmelCase : int = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
_lowerCAmelCase : List[str] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowerCAmelCase : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowerCAmelCase : Optional[int] = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowerCAmelCase : Optional[Any] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
_lowerCAmelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowerCAmelCase : List[str] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowerCAmelCase : Dict = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowerCAmelCase : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowerCAmelCase : Any = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowerCAmelCase : List[Any] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowerCAmelCase : Optional[Any] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCAmelCase : List[Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowerCAmelCase : str = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowerCAmelCase : Tuple = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowerCAmelCase : List[str] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowerCAmelCase : Dict = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowerCAmelCase : List[Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCAmelCase : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCAmelCase : int = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCAmelCase : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCAmelCase : str = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowerCAmelCase : int = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowerCAmelCase : Optional[int] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowerCAmelCase : Optional[int] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowerCAmelCase : Dict = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowerCAmelCase : Dict = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowerCAmelCase : Optional[Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowerCAmelCase : List[str] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowerCAmelCase : Dict = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowerCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[int] = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : Dict = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : int = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
_lowerCAmelCase : Union[str, Any] = DPTForSemanticSegmentation(_lowerCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
_lowerCAmelCase : Union[str, Any] = 480 if 'ade' in checkpoint_url else 384
_lowerCAmelCase : str = DPTImageProcessor(size=_lowerCamelCase )
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Tuple = image_processor(_lowerCamelCase , return_tensors='pt' )
# forward pass
_lowerCAmelCase : Union[str, Any] = model(**_lowerCamelCase ).logits if 'ade' in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
# Assert logits
_lowerCAmelCase : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
_lowerCAmelCase : int = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase )
)
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
_lowerCAmelCase = {str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCamelCase ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(_lowerCamelCase ) )
if __name__ == "__main__":
print(solution())
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
_lowerCAmelCase = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
_lowerCAmelCase = {
"""jukebox""": 5_1_2,
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_LYRIC_TOKENS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A ,_A ,_A=["v3", "v2", "v2"] ,_A=512 ,_A=5 ,_A="<|endoftext|>" ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else unk_token
super().__init__(
unk_token=_A ,n_genres=_A ,version=_A ,max_n_lyric_tokens=_A ,**_A ,)
_lowerCAmelCase : Dict = version
_lowerCAmelCase : List[str] = max_n_lyric_tokens
_lowerCAmelCase : Tuple = n_genres
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Dict = json.load(_A )
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : int = json.load(_A )
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : List[Any] = json.load(_A )
_lowerCAmelCase : int = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_lowerCAmelCase : int = oov.replace(r'\-\'' ,r'\-+\'' )
_lowerCAmelCase : Any = regex.compile(_A )
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.artists_encoder.items()}
_lowerCAmelCase : Tuple = {v: k for k, v in self.genres_encoder.items()}
_lowerCAmelCase : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [self.artists_encoder.get(_A ,0 ) for artist in list_artists]
for genres in range(len(_A ) ):
_lowerCAmelCase : List[Any] = [self.genres_encoder.get(_A ,0 ) for genre in list_genres[genres]]
_lowerCAmelCase : Tuple = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_lowerCAmelCase : Union[str, Any] = [[self.lyrics_encoder.get(_A ,0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return list(_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = self.prepare_for_tokenization(_A ,_A ,_A )
_lowerCAmelCase : Dict = self._tokenize(_A )
return artist, genre, lyrics
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_lowerCAmelCase : int = artists[idx].lower()
_lowerCAmelCase : Optional[int] = [genres[idx].lower()]
else:
_lowerCAmelCase : int = self._normalize(artists[idx] ) + '.v2'
_lowerCAmelCase : Optional[int] = [
self._normalize(_A ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_lowerCAmelCase : List[Any] = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
_lowerCAmelCase : Union[str, Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
_lowerCAmelCase : int = {vocab[index]: index + 1 for index in range(len(_A ) )}
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Tuple = len(_A ) + 1
_lowerCAmelCase : Optional[Any] = self.vocab
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
_lowerCAmelCase : Optional[Any] = ''
else:
_lowerCAmelCase : int = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
_lowerCAmelCase : str = self._run_strip_accents(_A )
_lowerCAmelCase : List[Any] = lyrics.replace('\\' ,'\n' )
_lowerCAmelCase : Optional[Any] = self.out_of_vocab.sub('' ,_A ), [], []
return artists, genres, lyrics
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = unicodedata.normalize('NFD' ,_A )
_lowerCAmelCase : Tuple = []
for char in text:
_lowerCAmelCase : Dict = unicodedata.category(_A )
if cat == "Mn":
continue
output.append(_A )
return "".join(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = (
[chr(_A ) for i in range(ord('a' ) ,ord('z' ) + 1 )]
+ [chr(_A ) for i in range(ord('A' ) ,ord('Z' ) + 1 )]
+ [chr(_A ) for i in range(ord('0' ) ,ord('9' ) + 1 )]
+ ['.']
)
_lowerCAmelCase : Optional[int] = frozenset(_A )
_lowerCAmelCase : List[Any] = re.compile(r'_+' )
_lowerCAmelCase : int = ''.join([c if c in accepted else '_' for c in text.lower()] )
_lowerCAmelCase : Optional[Any] = pattern.sub('_' ,_A ).strip('_' )
return text
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return " ".join(_A )
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if not isinstance(_A ,_A ):
_lowerCAmelCase : List[str] = TensorType(_A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
_lowerCAmelCase : List[str] = tf.constant
_lowerCAmelCase : Any = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
_lowerCAmelCase : Dict = torch.tensor
_lowerCAmelCase : List[str] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
_lowerCAmelCase : str = jnp.array
_lowerCAmelCase : List[str] = _is_jax
else:
_lowerCAmelCase : List[str] = np.asarray
_lowerCAmelCase : int = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_lowerCAmelCase : Optional[Any] = [inputs]
if not is_tensor(_A ):
_lowerCAmelCase : Any = as_tensor(_A )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self ,_A ,_A ,_A="" ,_A="pt" ):
'''simple docstring'''
_lowerCAmelCase : str = [0, 0, 0]
_lowerCAmelCase : List[Any] = [artist] * len(self.version )
_lowerCAmelCase : int = [genres] * len(self.version )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = self.tokenize(_A ,_A ,_A )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = self._convert_token_to_id(_A ,_A ,_A )
_lowerCAmelCase : int = [-INFINITY] * len(full_tokens[-1] )
_lowerCAmelCase : Union[str, Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=_A )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : int = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=_A ) )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=_A ) )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=_A ) )
return (artists_file, genres_file, lyrics_file)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.artists_decoder.get(_A )
_lowerCAmelCase : Any = [self.genres_decoder.get(_A ) for genre in genres_index]
_lowerCAmelCase : Any = [self.lyrics_decoder.get(_A ) for character in lyric_index]
return artist, genres, lyrics
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( ):
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_lowerCamelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_activation('swish' )
self.assertIsInstance(_A ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = get_activation('silu' )
self.assertIsInstance(_A ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_activation('mish' )
self.assertIsInstance(_A ,nn.Mish )
self.assertEqual(act(torch.tensor(-200 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_activation('gelu' )
self.assertIsInstance(_A ,nn.GELU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_lowerCAmelCase = int(input("""Enter number: """).strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowerCAmelCase = pd.read_csv("""sample_data.csv""", header=None)
_lowerCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowerCAmelCase = df.iloc[:, 1:2]
_lowerCAmelCase = actual_data.values.reshape(len_data, 1)
_lowerCAmelCase = MinMaxScaler().fit_transform(actual_data)
_lowerCAmelCase = 1_0
_lowerCAmelCase = 5
_lowerCAmelCase = 2_0
_lowerCAmelCase = len_data - periods * look_back
_lowerCAmelCase = actual_data[:division]
_lowerCAmelCase = actual_data[division - look_back :]
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase , _lowerCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowerCAmelCase = np.array(train_x)
_lowerCAmelCase = np.array(test_x)
_lowerCAmelCase = np.array([list(i.ravel()) for i in train_y])
_lowerCAmelCase = np.array([list(i.ravel()) for i in test_y])
_lowerCAmelCase = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
_lowerCAmelCase = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
_lowerCAmelCase = model.predict(x_test)
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowerCAmelCase = ["""text""", """image""", """audio"""]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
inputs.append(create_inputs(_lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = []
for output in outputs:
if isinstance(_lowerCamelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __UpperCamelCase :
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,'inputs' ) )
self.assertTrue(hasattr(self.tool ,'outputs' ) )
_lowerCAmelCase : str = self.tool.inputs
for _input in inputs:
if isinstance(_input ,_A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_lowerCAmelCase : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = create_inputs(self.tool.inputs )
_lowerCAmelCase : str = self.tool(*_A )
# There is a single output
if len(self.tool.outputs ) == 1:
_lowerCAmelCase : Union[str, Any] = [outputs]
self.assertListEqual(output_types(_A ) ,self.tool.outputs )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,'description' ) )
self.assertTrue(hasattr(self.tool ,'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = create_inputs(self.tool.inputs )
_lowerCAmelCase : List[Any] = self.tool(*_A )
if not isinstance(_A ,_A ):
_lowerCAmelCase : List[Any] = [outputs]
self.assertEqual(len(_A ) ,len(self.tool.outputs ) )
for output, output_type in zip(_A ,self.tool.outputs ):
_lowerCAmelCase : Dict = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = create_inputs(self.tool.inputs )
_lowerCAmelCase : int = []
for _input, input_type in zip(_A ,self.tool.inputs ):
if isinstance(_A ,_A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_lowerCAmelCase : Optional[int] = self.tool(*_A )
if not isinstance(_A ,_A ):
_lowerCAmelCase : Dict = [outputs]
self.assertEqual(len(_A ) ,len(self.tool.outputs ) )
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_lowerCAmelCase : Optional[int] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith('encoder' ):
_lowerCAmelCase : Dict = k.replace('.attn' , '.self_attn' )
_lowerCAmelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_lowerCAmelCase : Optional[int] = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_lowerCAmelCase : Union[str, Any] = k.replace('norm1' , 'self_attn_layer_norm' )
_lowerCAmelCase : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
_lowerCAmelCase : int = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_lowerCAmelCase : str = sd.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_lowerCAmelCase : Any = v
_lowerCAmelCase = ["""START"""]
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location='cpu' )
_lowerCAmelCase : str = model['model']
_lowerCAmelCase : Tuple = BlenderbotConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : str = BlenderbotForConditionalGeneration(_lowerCamelCase )
_lowerCAmelCase : Dict = m.model.state_dict().keys()
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_lowerCAmelCase : Dict = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_lowerCAmelCase : Optional[int] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
_lowerCAmelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""OwlViTFeatureExtractor"""]
_lowerCAmelCase = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase, _lowerCAmelCase : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowerCAmelCase : str = result + left + right
return input_list
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return input_list
_lowerCAmelCase : Dict = list(_lowerCamelCase )
# iteration for two-way merging
_lowerCAmelCase : str = 2
while p <= len(_lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCAmelCase : Any = i
_lowerCAmelCase : Optional[int] = i + p - 1
_lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
_lowerCAmelCase : int = merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = i
_lowerCAmelCase : int = merge(_lowerCamelCase , 0 , _lowerCamelCase , len(_lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_lowerCAmelCase = []
else:
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=False ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[Any] = use_input_mask
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Dict = num_choices
_lowerCAmelCase : List[str] = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : int = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = DistilBertModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = model(_A ,_A )
_lowerCAmelCase : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = DistilBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A ,attention_mask=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = DistilBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(
_A ,attention_mask=_A ,start_positions=_A ,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Any = DistilBertForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : Any = DistilBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A ,attention_mask=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.num_choices
_lowerCAmelCase : int = DistilBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : List[Any] = model(
_A ,attention_mask=_A ,labels=_A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((_lowerCAmelCase), (_lowerCAmelCase), (_lowerCAmelCase), (_lowerCAmelCase), (_lowerCAmelCase), (_lowerCAmelCase)) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCAmelCase = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = DistilBertModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self ,config_class=_A ,dim=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = DistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Tuple = model_class(config=_A )
_lowerCAmelCase : List[Any] = self._prepare_for_class(_A ,_A )
_lowerCAmelCase : Dict = torch.jit.trace(
_A ,(inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A ,os.path.join(_A ,'traced_model.pt' ) )
_lowerCAmelCase : int = torch.jit.load(os.path.join(_A ,'traced_model.pt' ) ,map_location=_A )
loaded(inputs_dict['input_ids'].to(_A ) ,inputs_dict['attention_mask'].to(_A ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = DistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowerCAmelCase : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(_A ,attention_mask=_A )[0]
_lowerCAmelCase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,_A )
_lowerCAmelCase : int = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_A ,atol=1E-4 ) )
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __UpperCamelCase :
def __init__( self ,_A ,_A=16 ,_A=13 ,_A=7 ,_A=14 ,_A=10 ,_A=19 ,_A=5 ,_A=4 ,_A=True ,_A=16 ,_A=2 ,_A=4 ,_A=4 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=[1, 2, 3, 4, 5] ,_A=25 ,_A=5 ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Union[str, Any] = prediction_length
_lowerCAmelCase : Union[str, Any] = context_length
_lowerCAmelCase : List[Any] = cardinality
_lowerCAmelCase : int = num_time_features
_lowerCAmelCase : Tuple = lags_sequence
_lowerCAmelCase : Union[str, Any] = embedding_dimension
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : str = context_length
_lowerCAmelCase : Any = prediction_length + label_length
_lowerCAmelCase : Any = label_length
_lowerCAmelCase : Tuple = moving_average
_lowerCAmelCase : Tuple = autocorrelation_factor
def __lowerCamelCase ( self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,prediction_length=self.prediction_length ,context_length=self.context_length ,label_length=self.label_length ,lags_sequence=self.lags_sequence ,num_time_features=self.num_time_features ,num_static_categorical_features=1 ,cardinality=[self.cardinality] ,embedding_dimension=[self.embedding_dimension] ,moving_average=self.moving_average ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = config.context_length + max(config.lags_sequence )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, 1] ,config.cardinality[0] )
_lowerCAmelCase : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length] )
_lowerCAmelCase : str = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCAmelCase : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCAmelCase : Optional[Any] = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_config()
_lowerCAmelCase : Dict = self.prepare_autoformer_inputs_dict(_A )
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AutoformerModel(config=_A ).to(_A ).eval()
_lowerCAmelCase : Tuple = model(**_A )
_lowerCAmelCase : Any = outputs.encoder_last_hidden_state
_lowerCAmelCase : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(_A )
_lowerCAmelCase : List[Any] = AutoformerEncoder.from_pretrained(_A ).to(_A )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = model.create_network_inputs(**_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCAmelCase : Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,dim=-1 ,)
_lowerCAmelCase : List[str] = encoder(inputs_embeds=_A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_lowerCAmelCase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] ,dim=1 )
.unsqueeze(1 )
.repeat(1 ,config.prediction_length ,1 )
)
_lowerCAmelCase : str = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,device=enc_input.device ,)
_lowerCAmelCase : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
_lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(_A )
_lowerCAmelCase : Dict = AutoformerDecoder.from_pretrained(_A ).to(_A )
_lowerCAmelCase : Tuple = decoder(
trend=_A ,inputs_embeds=_A ,encoder_hidden_states=_A ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCAmelCase = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoformerModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = model_class.from_pretrained(_A ,output_loading_info=_A )
self.assertEqual(info['missing_keys'] ,[] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
@unittest.skip(reason='Model has no tokens embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = inspect.signature(getattr(_A ,'forward' ) )
# The main input is the name of the argument after `self`
_lowerCAmelCase : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(_A )
_lowerCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(_A )] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = True
_lowerCAmelCase : str = getattr(self.model_tester ,'seq_length' ,_A )
_lowerCAmelCase : List[str] = getattr(self.model_tester ,'decoder_seq_length' ,_A )
_lowerCAmelCase : int = getattr(self.model_tester ,'encoder_seq_length' ,_A )
_lowerCAmelCase : List[Any] = getattr(self.model_tester ,'d_model' ,_A )
_lowerCAmelCase : str = getattr(self.model_tester ,'num_attention_heads' ,_A )
_lowerCAmelCase : Optional[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = False
_lowerCAmelCase : int = True
_lowerCAmelCase : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : int = True
_lowerCAmelCase : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : List[Any] = outputs.encoder_attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Union[str, Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_A ,_A )
# decoder attentions
_lowerCAmelCase : Dict = outputs.decoder_attentions
self.assertIsInstance(_A ,(list, tuple) )
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# cross attentions
_lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(_A ,(list, tuple) )
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# Check attention is always last and order is fine
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Dict = model(**self._prepare_for_class(_A ,_A ) )
self.assertEqual(out_len + 2 ,len(_A ) )
_lowerCAmelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
@is_flaky()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( _lowerCamelCase="train-batch.pt" ):
'''simple docstring'''
_lowerCAmelCase : int = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_lowerCamelCase , repo_type='dataset' )
_lowerCAmelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_A )
_lowerCAmelCase : Dict = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(
past_values=batch['past_values'] ,past_time_features=batch['past_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,static_categorical_features=batch['static_categorical_features'] ,future_values=batch['future_values'] ,future_time_features=batch['future_time_features'] ,)[0]
_lowerCAmelCase : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape ,_A )
_lowerCAmelCase : Dict = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] ,device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] ,_A ,atol=_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_A )
_lowerCAmelCase : Any = prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(
past_values=batch['past_values'] ,past_time_features=batch['past_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,static_categorical_features=batch['static_categorical_features'] ,).encoder_last_hidden_state
_lowerCAmelCase : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape ,_A )
_lowerCAmelCase : Tuple = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] ,device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] ,_A ,atol=_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_A )
_lowerCAmelCase : int = prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowerCAmelCase : Any = model.generate(
static_categorical_features=batch['static_categorical_features'] ,past_time_features=batch['past_time_features'] ,past_values=batch['past_values'] ,future_time_features=batch['future_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,)
_lowerCAmelCase : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape ,_A )
_lowerCAmelCase : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] ,device=_A )
_lowerCAmelCase : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,_A ,rtol=1E-1 ) )
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_lowerCAmelCase = {"""facebook/blenderbot_small-90M""": 5_1_2}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
_lowerCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Tuple = char
_lowerCAmelCase : Dict = set(_lowerCamelCase )
return pairs
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A ,_A="__start__" ,_A="__end__" ,_A="__unk__" ,_A="__null__" ,**_A ,):
'''simple docstring'''
super().__init__(unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,**_A )
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Union[str, Any] = json.load(_A )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[int] = merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase : Union[str, Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Dict = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Union[str, Any] = re.sub('([.,!?()])' ,r' \1' ,_A )
_lowerCAmelCase : Any = re.sub('(\')' ,r' \1 ' ,_A )
_lowerCAmelCase : Tuple = re.sub(r'\s{2,}' ,' ' ,_A )
if "\n" in token:
_lowerCAmelCase : Optional[Any] = token.replace('\n' ,' __newln__' )
_lowerCAmelCase : Optional[Any] = token.split(' ' )
_lowerCAmelCase : List[str] = []
for token in tokens:
if not len(_A ):
continue
_lowerCAmelCase : str = token.lower()
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
words.append(_A )
continue
while True:
_lowerCAmelCase : str = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Tuple = bigram
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Any = 0
while i < len(_A ):
try:
_lowerCAmelCase : str = word.index(_A ,_A )
new_word.extend(word[i:j] )
_lowerCAmelCase : Union[str, Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[int] = tuple(_A )
_lowerCAmelCase : str = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[Any] = get_pairs(_A )
_lowerCAmelCase : Any = '@@ '.join(_A )
_lowerCAmelCase : List[str] = word[:-4]
_lowerCAmelCase : Any = word
words.append(_A )
return " ".join(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = re.findall(r'\S+\n?' ,_A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = token.lower()
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.decoder.get(_A ,self.unk_token )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A ).replace('@@ ' ,'' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Dict = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Tuple = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : Tuple = 0
with open(_A ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return vocab_file, merge_file
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase :
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = question_encoder
_lowerCAmelCase : List[str] = generator
_lowerCAmelCase : str = self.question_encoder
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if os.path.isfile(_A ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_A ,exist_ok=_A )
_lowerCAmelCase : Union[str, Any] = os.path.join(_A ,'question_encoder_tokenizer' )
_lowerCAmelCase : Union[str, Any] = os.path.join(_A ,'generator_tokenizer' )
self.question_encoder.save_pretrained(_A )
self.generator.save_pretrained(_A )
@classmethod
def __lowerCamelCase ( cls ,_A ,**_A ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Any = kwargs.pop('config' ,_A )
if config is None:
_lowerCAmelCase : Dict = RagConfig.from_pretrained(_A )
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
_A ,config=config.question_encoder ,subfolder='question_encoder_tokenizer' )
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(
_A ,config=config.generator ,subfolder='generator_tokenizer' )
return cls(question_encoder=_A ,generator=_A )
def __call__( self ,*_A ,**_A ):
'''simple docstring'''
return self.current_tokenizer(*_A ,**_A )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.generator.batch_decode(*_A ,**_A )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.generator.decode(*_A ,**_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.question_encoder
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.generator
def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,_A = None ,_A = "longest" ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' ,_A ,)
if max_length is None:
_lowerCAmelCase : Union[str, Any] = self.current_tokenizer.model_max_length
_lowerCAmelCase : Optional[int] = self(
_A ,add_special_tokens=_A ,return_tensors=_A ,max_length=_A ,padding=_A ,truncation=_A ,**_A ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCAmelCase : str = self(
text_target=_A ,add_special_tokens=_A ,return_tensors=_A ,padding=_A ,max_length=_A ,truncation=_A ,**_A ,)
_lowerCAmelCase : Dict = labels['input_ids']
return model_inputs
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from collections.abc import Callable
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : float = a
_lowerCAmelCase : float = b
if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCamelCase ) == 0:
return b
elif (
function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_lowerCamelCase ) == 0:
return mid
elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0:
_lowerCAmelCase : str = mid
else:
_lowerCAmelCase : List[Any] = mid
_lowerCAmelCase : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Optional[int] = char
_lowerCAmelCase : Any = set(_lowerCamelCase )
return pairs
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,_A ,_A ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,**_A ,):
'''simple docstring'''
super().__init__(
bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,**_A ,)
_lowerCAmelCase : List[Any] = vocab_file
_lowerCAmelCase : Dict = merges_file
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Any = 3
self.add_from_file(_A )
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Tuple = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
_lowerCAmelCase : Union[str, Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = tuple(_A )
_lowerCAmelCase : int = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_lowerCAmelCase : Dict = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : str = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : int = bigram
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = 0
while i < len(_A ):
try:
_lowerCAmelCase : int = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[int] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : int = tuple(_A )
_lowerCAmelCase : Optional[int] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : str = '@@ '.join(_A )
_lowerCAmelCase : Union[str, Any] = word[:-4]
_lowerCAmelCase : Dict = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[str] = re.findall(r'\S+\n?' ,_A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.decoder.get(_A ,self.unk_token )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ' '.join(_A ).replace('@@ ' ,'' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Any = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file ,_A )
if os.path.abspath(self.merges_file ) != os.path.abspath(_A ):
copyfile(self.merges_file ,_A )
return out_vocab_file, out_merge_file
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if isinstance(_A ,_A ):
try:
with open(_A ,'r' ,encoding='utf-8' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
_lowerCAmelCase : List[str] = f.readlines()
for lineTmp in lines:
_lowerCAmelCase : List[str] = lineTmp.strip()
_lowerCAmelCase : Optional[Any] = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
_lowerCAmelCase : Optional[Any] = line[:idx]
_lowerCAmelCase : Optional[Any] = len(self.encoder )
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ort.SessionOptions()
_lowerCAmelCase : List[Any] = False
return options
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
_lowerCAmelCase : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=_A ,feature_extractor=_A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = 'A red cat sitting on a park bench'
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(0 )
_lowerCAmelCase : Any = pipe(
prompt=_A ,image=_A ,mask_image=_A ,strength=0.7_5 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=_A ,output_type='np' ,)
_lowerCAmelCase : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'hf-internal-testing/tiny-random-t5'
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_A )
_lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
_lowerCAmelCase : Optional[Any] = tokenizer('This is me' ,return_tensors='pt' )
_lowerCAmelCase : int = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_lowerCAmelCase : Union[str, Any] = model.generate(**_A )
_lowerCAmelCase : Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
_lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_lowerCAmelCase : Optional[int] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'hf-internal-testing/tiny-random-t5'
_lowerCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
_lowerCAmelCase : str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
_lowerCAmelCase : str = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = PhobertTokenizer
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : str = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
_lowerCAmelCase : Union[str, Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Optional[int] = ['#version: 0.2', 'l à</w>']
_lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 'Tôi là VinAI Research'
_lowerCAmelCase : Any = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = PhobertTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowerCAmelCase : List[Any] = 'Tôi là VinAI Research'
_lowerCAmelCase : Dict = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(_A )
print(_A )
self.assertListEqual(_A ,_A )
_lowerCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Dict = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,_A )
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(default=a__ , metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.train_file is not None:
_lowerCAmelCase : Any = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCAmelCase : Optional[int] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = 42
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
def __call__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = 'label' if 'label' in features[0].keys() else 'labels'
_lowerCAmelCase : Dict = [feature.pop(_A ) for feature in features]
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Any = len(features[0]['input_ids'] )
_lowerCAmelCase : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
_lowerCAmelCase : Any = list(chain(*_A ) )
_lowerCAmelCase : List[str] = self.tokenizer.pad(
_A ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_lowerCAmelCase : Union[str, Any] = {k: v.view(_A ,_A ,-1 ) for k, v in batch.items()}
# Add back labels
_lowerCAmelCase : Dict = torch.tensor(_A ,dtype=torch.intaa )
return batch
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCAmelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCAmelCase : Optional[Any] = {}
if data_args.train_file is not None:
_lowerCAmelCase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
_lowerCAmelCase : Any = data_args.validation_file
_lowerCAmelCase : List[Any] = data_args.train_file.split('.' )[-1]
_lowerCAmelCase : str = load_dataset(
_lowerCamelCase , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCAmelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCAmelCase : Dict = [f"""ending{i}""" for i in range(4 )]
_lowerCAmelCase : Tuple = 'sent1'
_lowerCAmelCase : Any = 'sent2'
if data_args.max_seq_length is None:
_lowerCAmelCase : Optional[int] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_lowerCAmelCase : Any = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_lowerCAmelCase : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
_lowerCAmelCase : str = examples[question_header_name]
_lowerCAmelCase : Dict = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowerCamelCase )
]
# Flatten out
_lowerCAmelCase : Tuple = list(chain(*_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = list(chain(*_lowerCamelCase ) )
# Tokenize
_lowerCAmelCase : List[str] = tokenizer(
_lowerCamelCase , _lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowerCAmelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = min(len(_lowerCamelCase ) , data_args.max_train_samples )
_lowerCAmelCase : Tuple = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowerCAmelCase : Any = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowerCAmelCase : Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowerCAmelCase : str = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
_lowerCAmelCase : str = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowerCAmelCase : int = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCAmelCase : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase, _lowerCAmelCase : Dict = eval_predictions
_lowerCAmelCase : Optional[Any] = np.argmax(_lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCAmelCase : Optional[int] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Dict = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCAmelCase : int = train_result.metrics
_lowerCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Union[str, Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('train' , _lowerCamelCase )
trainer.save_metrics('train' , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase : Optional[Any] = trainer.evaluate()
_lowerCAmelCase : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : Dict = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=2 ,_A=3 ,_A=4 ,_A=2 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=36 ,_A=3 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=6 ,_A=6 ,_A=3 ,_A=4 ,_A=None ,_A=1000 ,):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : str = text_seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = coordinate_size
_lowerCAmelCase : Optional[Any] = shape_size
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : str = num_choices
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Dict = text_seq_length
_lowerCAmelCase : Optional[int] = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : int = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Union[str, Any] = bbox[i, j, 3]
_lowerCAmelCase : Any = bbox[i, j, 1]
_lowerCAmelCase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Tuple = bbox[i, j, 2]
_lowerCAmelCase : Any = bbox[i, j, 0]
_lowerCAmelCase : Any = t
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
_lowerCAmelCase : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = LayoutLMvaModel(config=_A )
model.to(_A )
model.eval()
# text + image
_lowerCAmelCase : str = model(_A ,pixel_values=_A )
_lowerCAmelCase : Tuple = model(
_A ,bbox=_A ,pixel_values=_A ,attention_mask=_A ,token_type_ids=_A )
_lowerCAmelCase : Union[str, Any] = model(_A ,bbox=_A ,pixel_values=_A ,token_type_ids=_A )
_lowerCAmelCase : Optional[Any] = model(_A ,bbox=_A ,pixel_values=_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : Union[str, Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : str = model(pixel_values=_A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Dict = LayoutLMvaForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(
_A ,bbox=_A ,pixel_values=_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : int = LayoutLMvaForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(
_A ,bbox=_A ,pixel_values=_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = LayoutLMvaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
_A ,bbox=_A ,pixel_values=_A ,attention_mask=_A ,token_type_ids=_A ,start_positions=_A ,end_positions=_A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
return True
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LayoutLMvaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ,_A ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(_A )
if model_class in get_values(_A ):
_lowerCAmelCase : int = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(_A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
_lowerCAmelCase : int = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=_A )
elif model_class in get_values(_A ):
_lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
_lowerCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
elif model_class in [
*get_values(_A ),
]:
_lowerCAmelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
elif model_class in [
*get_values(_A ),
]:
_lowerCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=_A ,)
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Dict = type
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = LayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_A )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(images=_A ,return_tensors='pt' ).pixel_values.to(_A )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase : Union[str, Any] = model(
input_ids=input_ids.to(_A ) ,bbox=bbox.to(_A ) ,pixel_values=pixel_values.to(_A ) ,)
# verify the logits
_lowerCAmelCase : Tuple = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape ,_A )
_lowerCAmelCase : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_A ,atol=1E-4 ) )
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "bit"
_UpperCAmelCase = ["preactivation", "bottleneck"]
_UpperCAmelCase = ["SAME", "VALID"]
def __init__( self ,_A=3 ,_A=64 ,_A=[256, 512, 1024, 2048] ,_A=[3, 4, 6, 3] ,_A="preactivation" ,_A="relu" ,_A=None ,_A=32 ,_A=0.0 ,_A=False ,_A=32 ,_A=1 ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase : Dict = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : str = embedding_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Dict = layer_type
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Tuple = global_padding
_lowerCAmelCase : Dict = num_groups
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Tuple = embedding_dynamic_padding
_lowerCAmelCase : Union[str, Any] = output_stride
_lowerCAmelCase : Dict = width_factor
_lowerCAmelCase : List[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase, _lowerCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
_lowerCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,_A ,)
_lowerCAmelCase : Union[str, Any] = kwargs.pop('feature_extractor' )
_lowerCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
_lowerCAmelCase : List[Any] = tokenizer
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('gpt2' )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_A ,_A )
def __call__( self ,_A=None ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_lowerCAmelCase : Union[str, Any] = self.image_processor(_A ,return_tensors=_A ,**_A )
if text is not None:
_lowerCAmelCase : Tuple = self.char_tokenizer(_A ,return_tensors=_A ,**_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCAmelCase : Any = encodings['input_ids']
return inputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = sequences
_lowerCAmelCase : List[Any] = char_preds.size(0 )
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._decode_helper(_A ,'char' )
_lowerCAmelCase, _lowerCAmelCase : Tuple = self._decode_helper(_A ,'bpe' )
_lowerCAmelCase, _lowerCAmelCase : int = self._decode_helper(_A ,'wp' )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = []
for i in range(_A ):
_lowerCAmelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCAmelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCAmelCase : Optional[Any] = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : Any = final_strs
_lowerCAmelCase : Optional[Any] = final_scores
_lowerCAmelCase : Optional[int] = char_strs
_lowerCAmelCase : int = bpe_strs
_lowerCAmelCase : Any = wp_strs
return out
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCAmelCase : int = self.char_decode
_lowerCAmelCase : Any = 1
_lowerCAmelCase : int = '[s]'
elif format == DecodeType.BPE:
_lowerCAmelCase : Optional[Any] = self.bpe_decode
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : Optional[Any] = '#'
elif format == DecodeType.WORDPIECE:
_lowerCAmelCase : str = self.wp_decode
_lowerCAmelCase : Any = 102
_lowerCAmelCase : Any = '[SEP]'
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCAmelCase, _lowerCAmelCase : str = [], []
_lowerCAmelCase : Union[str, Any] = pred_logits.size(0 )
_lowerCAmelCase : List[str] = pred_logits.size(1 )
_lowerCAmelCase, _lowerCAmelCase : int = pred_logits.topk(1 ,dim=-1 ,largest=_A ,sorted=_A )
_lowerCAmelCase : Tuple = preds_index.view(-1 ,_A )[:, 1:]
_lowerCAmelCase : Union[str, Any] = decoder(_A )
_lowerCAmelCase, _lowerCAmelCase : Any = torch.nn.functional.softmax(_A ,dim=2 ).max(dim=2 )
_lowerCAmelCase : Tuple = preds_max_prob[:, 1:]
for index in range(_A ):
_lowerCAmelCase : Any = preds_str[index].find(_A )
_lowerCAmelCase : Dict = preds_str[index][:pred_eos]
_lowerCAmelCase : List[Any] = preds_index[index].cpu().tolist()
_lowerCAmelCase : Dict = pred_index.index(_A ) if eos_token in pred_index else -1
_lowerCAmelCase : int = preds_max_prob[index][: pred_eos_index + 1]
_lowerCAmelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = [seq.replace(' ' ,'' ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [seq.replace(' ' ,'' ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
_lowerCAmelCase : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCAmelCase = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if metric == "rouge2":
_lowerCAmelCase : Dict = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCAmelCase : int = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCAmelCase : List[str] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
_lowerCAmelCase : Union[str, Any] = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=f"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class __UpperCamelCase ( pl.Callback ):
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_lowerCAmelCase : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCAmelCase : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCAmelCase : str = od / 'test_results.txt'
_lowerCAmelCase : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCAmelCase : Union[str, Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_lowerCAmelCase : Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A ,'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCAmelCase : Union[str, Any] = metrics[key]
if isinstance(_A ,torch.Tensor ):
_lowerCAmelCase : int = val.item()
_lowerCAmelCase : Optional[Any] = F"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
_lowerCAmelCase : Optional[Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
try:
_lowerCAmelCase : Any = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCAmelCase : Any = pl_module.model.num_parameters()
_lowerCAmelCase : Optional[int] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_A ,_A ,'test' )
@rank_zero_only
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
_lowerCAmelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_lowerCAmelCase = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
_lowerCAmelCase = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def __lowerCamelCase ( self ,_A=None ,_A=None ,_A=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(_A ,_A )["wer"]
else:
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Any = 0
for prediction, reference in zip(_A ,_A ):
_lowerCAmelCase : List[str] = compute_measures(_A ,_A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
_UpperCAmelCase = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase = PandasConfig
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A ,(str, list, tuple) ):
_lowerCAmelCase : Tuple = data_files
if isinstance(_A ,_A ):
_lowerCAmelCase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : List[str] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
_lowerCAmelCase : Any = []
for split_name, files in data_files.items():
if isinstance(_A ,_A ):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : List[str] = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A ,gen_kwargs={'files': files} ) )
return splits
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(_A ,self.config.features.arrow_schema )
return pa_table
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A ,'rb' ) as f:
_lowerCAmelCase : List[Any] = pa.Table.from_pandas(pd.read_pickle(_A ) )
yield i, self._cast_table(_A )
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(_lowerCamelCase )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=300 ):
'''simple docstring'''
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowerCAmelCase : str = f"""{elt:.6f}""" if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __UpperCamelCase :
_UpperCAmelCase = 5
_UpperCAmelCase = 0.2
def __init__( self ,_A ,_A = None ,_A = True ,_A = None ,_A = 300 ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = total
_lowerCAmelCase : str = '' if prefix is None else prefix
_lowerCAmelCase : Optional[int] = leave
_lowerCAmelCase : int = parent
_lowerCAmelCase : List[Any] = width
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[Any] = None
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = value
if comment is not None:
_lowerCAmelCase : Optional[Any] = comment
if self.last_value is None:
_lowerCAmelCase : Optional[Any] = time.time()
_lowerCAmelCase : Dict = value
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = self.warmup
_lowerCAmelCase : Union[str, Any] = 1
self.update_bar(_A )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowerCAmelCase : str = time.time()
_lowerCAmelCase : int = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowerCAmelCase : int = self.elapsed_time / (value - self.start_value)
else:
_lowerCAmelCase : List[str] = None
if value >= self.total:
_lowerCAmelCase : Tuple = self.total
_lowerCAmelCase : Optional[int] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowerCAmelCase : int = self.average_time_per_item * (self.total - value)
self.update_bar(_A )
_lowerCAmelCase : Union[str, Any] = value
_lowerCAmelCase : Optional[Any] = current_time
if self.average_time_per_item is None:
_lowerCAmelCase : Any = 1
else:
_lowerCAmelCase : Optional[int] = max(int(self.update_every / self.average_time_per_item ) ,1 )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ' ' * (len(str(self.total ) ) - len(str(_A ) )) + str(_A )
if self.elapsed_time is None:
_lowerCAmelCase : Any = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_lowerCAmelCase : List[Any] = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_lowerCAmelCase : List[Any] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowerCAmelCase : Optional[int] = disp.display(disp.HTML(self.html_code ) ,display_id=_A )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A=None ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Optional[Any] = None if column_names is None else [column_names]
_lowerCAmelCase : Optional[Any] = None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowerCAmelCase : Union[str, Any] = disp.display(disp.HTML(self.html_code ) ,display_id=_A )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.inner_table is None:
_lowerCAmelCase : Dict = [list(values.keys() ), list(values.values() )]
else:
_lowerCAmelCase : Union[str, Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_A )
_lowerCAmelCase : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=300 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = NotebookProgressBar(_A ,prefix=_A ,parent=self ,width=_A )
return self.child_bar
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = None
self.display()
class __UpperCamelCase ( a__ ):
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : int = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : List[Any] = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : int = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_lowerCAmelCase : List[str] = NotebookTrainingTracker(state.max_steps ,_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 ,comment=F"""Epoch {epoch}/{state.num_train_epochs}""" ,force_update=self._force_next_update ,)
_lowerCAmelCase : int = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
if not has_length(_A ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowerCAmelCase : str = self.training_tracker.add_child(len(_A ) )
else:
_lowerCAmelCase : str = NotebookProgressBar(len(_A ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowerCAmelCase : Tuple = None
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowerCAmelCase : str = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowerCAmelCase : Dict = state.global_step
self.training_tracker.write_line(_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
if self.training_tracker is not None:
_lowerCAmelCase : Any = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_lowerCAmelCase : str = log['loss']
break
if self.first_column == "Epoch":
_lowerCAmelCase : Dict = int(state.epoch )
else:
_lowerCAmelCase : Optional[Any] = state.global_step
_lowerCAmelCase : Optional[Any] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_lowerCAmelCase : List[Any] = re.sub(r'\_loss$' ,'' ,_A )
_lowerCAmelCase : Dict = metrics.pop('total_flos' ,_A )
_lowerCAmelCase : int = metrics.pop('epoch' ,_A )
_lowerCAmelCase : str = metrics.pop(F"""{metric_key_prefix}_runtime""" ,_A )
_lowerCAmelCase : Dict = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" ,_A )
_lowerCAmelCase : Tuple = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" ,_A )
_lowerCAmelCase : int = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" ,_A )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
_lowerCAmelCase : Optional[int] = v
else:
_lowerCAmelCase : Tuple = k.split('_' )
_lowerCAmelCase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]] )
_lowerCAmelCase : str = v
self.training_tracker.write_line(_A )
self.training_tracker.remove_child()
_lowerCAmelCase : Tuple = None
# Evaluation takes a long time so we should force the next update.
_lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
self.training_tracker.update(
state.global_step ,comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" ,force_update=_A )
_lowerCAmelCase : List[Any] = None
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowerCAmelCase : Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_lowerCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = matrix[1][1], matrix[0][0]
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowerCAmelCase : str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_lowerCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowerCAmelCase : Any = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowerCAmelCase : Optional[int] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : Union[str, Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowerCAmelCase : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowerCAmelCase : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowerCAmelCase : Tuple = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : Tuple = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowerCAmelCase : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowerCAmelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowerCAmelCase : Tuple = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
_lowerCAmelCase : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowerCAmelCase : Any = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=10 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for _ in range(_lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=10 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
for step in range(_lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = os.path.join(_lowerCamelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , _lowerCamelCase )
_lowerCAmelCase : str = torch.load(_lowerCamelCase )
scheduler.load_state_dict(_lowerCamelCase )
return lrs
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
self.assertEqual(len(_A ) ,len(_A ) )
for a, b in zip(_A ,_A ):
self.assertAlmostEqual(_A ,_A ,delta=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=_A )
_lowerCAmelCase : Tuple = torch.tensor([0.4, 0.2, -0.5] )
_lowerCAmelCase : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_lowerCAmelCase : List[Any] = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(100 ):
_lowerCAmelCase : int = criterion(_A ,_A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=_A )
_lowerCAmelCase : str = torch.tensor([0.4, 0.2, -0.5] )
_lowerCAmelCase : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_lowerCAmelCase : int = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=_A ,weight_decay=0.0 ,relative_step=_A ,scale_parameter=_A ,warmup_init=_A ,)
for _ in range(1000 ):
_lowerCAmelCase : Optional[int] = criterion(_A ,_A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
_UpperCAmelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase = 10
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ):
'''simple docstring'''
self.assertEqual(len(_A ) ,len(_A ) )
for a, b in zip(_A ,_A ):
self.assertAlmostEqual(_A ,_A ,delta=_A ,msg=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : Dict = scheduler_func(self.optimizer ,**_A )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
_lowerCAmelCase : int = unwrap_schedule(_A ,self.num_steps )
self.assertListAlmostEqual(
_A ,_A ,tol=1E-2 ,msg=F"""failed for {scheduler_func} in normal scheduler""" ,)
_lowerCAmelCase : Dict = scheduler_func(self.optimizer ,**_A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_A ) # wrap to test picklability of the schedule
_lowerCAmelCase : int = unwrap_and_save_reload_schedule(_A ,self.num_steps )
self.assertListEqual(_A ,_A ,msg=F"""failed for {scheduler_func} in save and reload""" )
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = fn
def __call__( self ,*_A ,**_A ):
'''simple docstring'''
return self.fn(*_A ,**_A )
@classmethod
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = list(map(self ,scheduler.lr_lambdas ) )
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCAmelCase = """bart"""
_lowerCAmelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
_lowerCAmelCase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowerCAmelCase : int = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowerCAmelCase : Union[str, Any] = qar_model.eval()
else:
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowerCAmelCase : List[str] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowerCAmelCase : List[str] = sas_model.eval()
else:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
_lowerCAmelCase : Dict = faiss.StandardGpuResources()
_lowerCAmelCase : List[str] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowerCAmelCase : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowerCAmelCase : Tuple = faiss.IndexFlatIP(128 )
_lowerCAmelCase : List[Any] = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
_lowerCAmelCase, _lowerCAmelCase : List[str] = (None, None)
_lowerCAmelCase : Tuple = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowerCAmelCase : Optional[int] = elia['train_eli5']
_lowerCAmelCase : Union[str, Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowerCAmelCase : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_indexes()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_models()
_lowerCAmelCase , _lowerCAmelCase = load_train_data()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=10 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase, _lowerCAmelCase : Dict = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ):
'''simple docstring'''
if source == "none":
_lowerCAmelCase, _lowerCAmelCase : str = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowerCAmelCase, _lowerCAmelCase : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name='english_wiki40b_snippets_100w' , n_results=_lowerCamelCase , )
_lowerCAmelCase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowerCAmelCase : Any = 'question: {} context: {}'.format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_lowerCAmelCase = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_lowerCAmelCase = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCAmelCase = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCAmelCase = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_lowerCAmelCase = st.sidebar.checkbox("""Demo options""")
if demo_options:
_lowerCAmelCase = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_lowerCAmelCase = action_list.index(action_st)
_lowerCAmelCase = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_lowerCAmelCase = show_type == """Show full text of passages"""
else:
_lowerCAmelCase = 3
_lowerCAmelCase = True
_lowerCAmelCase = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_lowerCAmelCase = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_lowerCAmelCase = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_lowerCAmelCase = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_lowerCAmelCase = """wiki40b"""
_lowerCAmelCase = """dense"""
_lowerCAmelCase = """beam"""
_lowerCAmelCase = 2
_lowerCAmelCase = 6_4
_lowerCAmelCase = 2_5_6
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = st.sidebar.checkbox("""Generation options""")
if generate_options:
_lowerCAmelCase = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_lowerCAmelCase = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_lowerCAmelCase = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_lowerCAmelCase = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_lowerCAmelCase = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCAmelCase = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCAmelCase = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCAmelCase = None
# start main text
_lowerCAmelCase = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_lowerCAmelCase = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCAmelCase = st.text_input("""Enter your question here:""", """""")
else:
_lowerCAmelCase = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_lowerCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCAmelCase = support_list[:1_0]
_lowerCAmelCase = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_lowerCAmelCase , _lowerCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_lowerCAmelCase = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_lowerCAmelCase = res[1].strip()
if sec_titles == "":
_lowerCAmelCase = """[{}]({})""".format(res[0], wiki_url)
else:
_lowerCAmelCase = sec_titles.split(""" & """)
_lowerCAmelCase = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCAmelCase = find_nearest_training(question)
_lowerCAmelCase = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_lowerCAmelCase = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_lowerCAmelCase = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
import qiskit
def lowerCamelCase__ ( _lowerCamelCase = 2 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = qubits
# Using Aer's simulator
_lowerCAmelCase : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_lowerCAmelCase : Optional[int] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase ) ) , list(range(_lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCAmelCase : List[Any] = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1000 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = MgpstrTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = {}
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCAmelCase : List[Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_lowerCAmelCase : List[str] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 'tester'
_lowerCAmelCase : Tuple = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : List[str] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
_lowerCAmelCase : Dict = tokenizer.encode([special_token] ,add_special_tokens=_A )
self.assertEqual(len(_A ) ,1 )
_lowerCAmelCase : Dict = tokenizer.decode(_A ,skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.get_input_output_texts(_A )
_lowerCAmelCase : Dict = tokenizer.tokenize(_A )
_lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(_A )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(_A ,add_special_tokens=_A )
self.assertListEqual(_A ,_A )
_lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(_A )
self.assertNotEqual(len(_A ) ,0 )
_lowerCAmelCase : int = tokenizer.decode(_A )
self.assertIsInstance(_A ,_A )
self.assertEqual(text_a.replace(' ' ,'' ) ,_A )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "sew-d"
def __init__( self ,_A=32 ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A=2 ,_A=512 ,_A=256 ,_A=True ,_A=True ,_A=("p2c", "c2p") ,_A="layer_norm" ,_A="gelu_python" ,_A=0.1 ,_A=0.1 ,_A=0.1 ,_A=0.0 ,_A=0.1 ,_A=0.0_2 ,_A=1E-7 ,_A=1E-5 ,_A="group" ,_A="gelu" ,_A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,_A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_A=False ,_A=128 ,_A=16 ,_A=True ,_A=0.0_5 ,_A=10 ,_A=2 ,_A=0.0 ,_A=10 ,_A=0 ,_A="mean" ,_A=False ,_A=False ,_A=256 ,_A=0 ,_A=1 ,_A=2 ,**_A ,):
'''simple docstring'''
super().__init__(**_A ,pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A )
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Any = feat_extract_norm
_lowerCAmelCase : Optional[int] = feat_extract_activation
_lowerCAmelCase : Optional[int] = list(_A )
_lowerCAmelCase : List[Any] = list(_A )
_lowerCAmelCase : Tuple = list(_A )
_lowerCAmelCase : Dict = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : str = num_conv_pos_embedding_groups
_lowerCAmelCase : Optional[Any] = len(self.conv_dim )
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Dict = squeeze_factor
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[int] = position_buckets
_lowerCAmelCase : Union[str, Any] = share_att_key
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : int = norm_rel_ebd
_lowerCAmelCase : str = list(_A )
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[Any] = hidden_dropout
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : Union[str, Any] = final_dropout
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = feature_layer_norm_eps
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Tuple = apply_spec_augment
_lowerCAmelCase : List[Any] = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : Union[str, Any] = mask_time_min_masks
_lowerCAmelCase : Any = mask_feature_prob
_lowerCAmelCase : List[str] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : List[str] = ctc_zero_infinity
# sequence classification
_lowerCAmelCase : Union[str, Any] = use_weighted_layer_sum
_lowerCAmelCase : str = classifier_proj_size
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowerCAmelCase : Dict = parser.parse_args()
return args.f
class __UpperCamelCase ( a__ ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(_A ,'argv' ,_A ):
_lowerCAmelCase : Any = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_A ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(_A )
_lowerCAmelCase : Optional[int] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_A )
_lowerCAmelCase : Any = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_A )
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = np.shape(_lowerCamelCase )
if rows != columns:
_lowerCAmelCase : Dict = (
'\'table\' has to be of square shaped array but got a '
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.zeros((rows, columns) )
_lowerCAmelCase : Optional[Any] = np.zeros((rows, columns) )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_lowerCAmelCase : Optional[Any] = (table[i][j] - total) / upper[j][j]
_lowerCAmelCase : Union[str, Any] = 1
for j in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def __lowerCamelCase ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=_A ,)
assert hasattr(self ,'env' )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
_lowerCAmelCase : Union[str, Any] = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=_A ,instance_count=_A ,instance_type=self.instance_type ,debugger_hook_config=_A ,hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=_A ,py_version='py36' ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,_A )
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase = 1.054_571_817E-34 # unit of ℏ : J * s
_lowerCAmelCase = 3E8 # unit of c : m * s^-1
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowerCAmelCase : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : Any = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
import pytest
_lowerCAmelCase = """__dummy_dataset1__"""
_lowerCAmelCase = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = dataset_loading_script_name
_lowerCAmelCase : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=_lowerCamelCase )
_lowerCAmelCase : Any = script_dir / f"""{script_name}.py"""
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' ,_A ,)
super().__init__(*_A ,**_A )
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCAmelCase = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCAmelCase = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = random.randint(0 , len(_lowerCamelCase ) - 1 )
_lowerCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase : List[Any] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase : List[str] = int(parent_a[1] * 100 ) + 1
_lowerCAmelCase : List[Any] = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = population_score[random.randint(0 , _lowerCamelCase )][0]
_lowerCAmelCase, _lowerCAmelCase : int = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_lowerCAmelCase : Tuple = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase : Any = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowerCamelCase )
# Generate random starting population.
_lowerCAmelCase : List[Any] = []
for _ in range(_lowerCamelCase ):
population.append(''.join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase, _lowerCAmelCase : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase : Any = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase : Tuple = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase : int = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCAmelCase = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
_lowerCAmelCase = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A )
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = 50 ,_A = None ,_A = "pil" ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = self.unet.config.sample_size
_lowerCAmelCase : List[Any] = (batch_size, 3, img_size, img_size)
_lowerCAmelCase : List[str] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase : Optional[Any] = randn_tensor(_A ,generator=_A ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowerCAmelCase : Tuple = self.scheduler.schedule[t]
_lowerCAmelCase : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase, _lowerCAmelCase : Any = self.scheduler.add_noise_to_input(_A ,_A ,generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase : Any = self.scheduler.step(_A ,_A ,_A ,_A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
_lowerCAmelCase : Any = self.scheduler.step_correct(
_A ,_A ,_A ,_A ,step_output.prev_sample ,step_output['derivative'] ,)
_lowerCAmelCase : List[Any] = step_output.prev_sample
_lowerCAmelCase : Any = (sample / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : str = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=30 ,_A=2 ,_A=3 ,_A=True ,_A=True ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=10 ,_A=0.0_2 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : str = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTMSNModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : str = ViTMSNForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_A ,labels=_A )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : str = 1
_lowerCAmelCase : Dict = ViTMSNForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = ViTMSNModelTester(self )
_lowerCAmelCase : str = ConfigTester(self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(_A )
_lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = ViTMSNModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_A )
_lowerCAmelCase : Any = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**_A )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : int = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_A ,atol=1E-4 ) )
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as metadata_file:
_lowerCAmelCase : Union[str, Any] = json.load(_lowerCamelCase )
_lowerCAmelCase : List[str] = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase , map_location='cpu' )
# Load the entity vocab file
_lowerCAmelCase : Dict = load_entity_vocab(_lowerCamelCase )
_lowerCAmelCase : Tuple = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Optional[Any] = AddedToken('<ent>' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = AddedToken('<ent2>' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
_lowerCAmelCase : Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
_lowerCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
_lowerCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : Union[str, Any] = f"""encoder.layer.{layer_index}.attention.self."""
_lowerCAmelCase : Dict = state_dict[prefix + matrix_name]
_lowerCAmelCase : Union[str, Any] = state_dict[prefix + matrix_name]
_lowerCAmelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : List[Any] = state_dict['entity_embeddings.entity_embeddings.weight']
_lowerCAmelCase : Tuple = entity_emb[entity_vocab['[MASK]']]
_lowerCAmelCase : Union[str, Any] = LukeModel(config=_lowerCamelCase ).eval()
_lowerCAmelCase, _lowerCAmelCase : Any = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if not (len(_lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {', '.join(_lowerCamelCase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
_lowerCAmelCase : Any = LukeTokenizer.from_pretrained(_lowerCamelCase , task='entity_classification' )
_lowerCAmelCase : Tuple = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
_lowerCAmelCase : Optional[Any] = (39, 42)
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , entity_spans=[span] , add_prefix_space=_lowerCamelCase , return_tensors='pt' )
_lowerCAmelCase : int = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
_lowerCAmelCase : Optional[Any] = torch.Size((1, 42, 1024) )
_lowerCAmelCase : Tuple = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_lowerCAmelCase : str = torch.Size((1, 42, 768) )
_lowerCAmelCase : Optional[Any] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1, 1024) )
_lowerCAmelCase : List[Any] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_lowerCAmelCase : Tuple = torch.Size((1, 1, 768) )
_lowerCAmelCase : Optional[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = {}
with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_lowerCamelCase ):
_lowerCAmelCase, _lowerCAmelCase : List[str] = line.rstrip().split('\t' )
_lowerCAmelCase : Any = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowerCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A=False ,_A=False ,_A=False ,_A=None ,_A=None ,_A=None ,_A=None ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : str = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowerCAmelCase : Optional[Any] = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase : Optional[int] = '<|endoftext|>' if eos_token is None else eos_token
_lowerCAmelCase : Any = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase : Tuple = unk_token if pad_token is None else pad_token
_lowerCAmelCase : Any = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase : List[str] = '<pad>' if pad_token is None else pad_token
_lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,pad_token=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : Tuple = do_lower_case
_lowerCAmelCase : List[str] = remove_space
_lowerCAmelCase : List[Any] = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase : Optional[int] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase : List[str] = re.compile(
F"""[{''.join(map(_A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.__dict__.copy()
_lowerCAmelCase : List[Any] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.non_printing_characters_re.sub('' ,_A )
# Normalize whitespaces
_lowerCAmelCase : Union[str, Any] = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowerCAmelCase : Dict = unicodedata.normalize('NFC' ,_A )
return text
def __lowerCamelCase ( self ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.preprocess_text(_A )
return self.sp_model.encode(_A ,out_type=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Dict = ''
_lowerCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(_A )
_lowerCAmelCase : int = False
out_string += self.sp_model.decode(_A )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[str] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __lowerCamelCase ( self ,_A ,_A = False ):
'''simple docstring'''
if isinstance(_A ,_A ):
_lowerCAmelCase : Tuple = self.preprocess_text(_A )
_lowerCAmelCase : Optional[int] = self.sp_model.encode(_A )
else:
_lowerCAmelCase : Optional[int] = [self.preprocess_text(_A ) for t in text]
_lowerCAmelCase : Optional[Any] = self.sp_model.encode(_A )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase : Dict = torch.tensor(_A )
return token_ids
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.decode(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
_lowerCAmelCase : Any = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(_A ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=_A )
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["pixel_values"]
def __init__( self ,_A = True ,_A = None ,_A = PIL.Image.BICUBIC ,_A = True ,_A = None ,_A = 1 / 255 ,_A = True ,_A = True ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : int = size if size is not None else {'height': 256, 'width': 256}
_lowerCAmelCase : List[Any] = get_size_dict(_A )
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase : int = get_size_dict(_A ,param_name='crop_size' )
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : int = size
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_center_crop
_lowerCAmelCase : Dict = crop_size
_lowerCAmelCase : Any = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : int = do_normalize
_lowerCAmelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self ,_A ,_A ,_A = PIL.Image.BICUBIC ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A ,size=(size['height'], size['width']) ,resample=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A ,size=(size['height'], size['width']) ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
return rescale(_A ,scale=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
return normalize(_A ,mean=_A ,std=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,_A=None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = ChannelDimension.FIRST ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = resample if resample is not None else self.resample
_lowerCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : List[str] = size if size is not None else self.size
_lowerCAmelCase : List[str] = get_size_dict(_A )
_lowerCAmelCase : int = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : str = get_size_dict(_A ,param_name='crop_size' )
_lowerCAmelCase : Dict = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[int] = [to_numpy_array(_A ) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=_A ,size=_A ,resample=_A ) for image in images]
if do_center_crop:
_lowerCAmelCase : List[Any] = [self.center_crop(image=_A ,size=_A ) for image in images]
if do_rescale:
_lowerCAmelCase : Optional[Any] = [self.rescale(image=_A ,scale=_A ) for image in images]
if do_normalize:
_lowerCAmelCase : Union[str, Any] = [self.normalize(image=_A ,mean=_A ,std=_A ) for image in images]
_lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(_A ,_A ) for image in images]
_lowerCAmelCase : Optional[int] = {'pixel_values': images}
return BatchFeature(data=_A ,tensor_type=_A )
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase = 1000 ):
'''simple docstring'''
_lowerCAmelCase : Dict = 3
_lowerCAmelCase : Optional[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""ConvNextFeatureExtractor"""]
_lowerCAmelCase = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=3 ,_A=True ,_A=True ,_A=0.1 ,_A=0.1 ,_A=224 ,_A=1000 ,_A=[3, 3, 6, 4] ,_A=[48, 56, 112, 220] ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Optional[int] = layer_depths
_lowerCAmelCase : str = embed_dims
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths ,embed_dims=self.embed_dims ,mlp_ratio=4 ,downsamples=[True, True, True, True] ,hidden_act='gelu' ,num_labels=self.num_labels ,down_patch_size=3 ,down_stride=2 ,down_pad=1 ,drop_rate=0.0 ,drop_path_rate=0.0 ,use_layer_scale=_A ,layer_scale_init_value=1E-5 ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = SwiftFormerModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : List[Any] = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
_lowerCAmelCase : Optional[int] = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = model(_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
((_lowerCAmelCase), (_lowerCAmelCase), (_lowerCAmelCase)) : Any = self.prepare_config_and_inputs()
_lowerCAmelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = SwiftFormerModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(
self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 ,num_attention_heads=12 ,num_hidden_layers=12 ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(_A )
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = SwiftFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : Tuple = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.hidden_states
_lowerCAmelCase : List[str] = 8
self.assertEqual(len(_A ) ,_A ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_A ) ):
self.assertEqual(
hidden_states[i].shape ,torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) ,)
_lowerCAmelCase, _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : int = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def _config_zero_init(_A ):
_lowerCAmelCase : Optional[Any] = copy.deepcopy(_A )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_A ,_A ,1E-10 )
if isinstance(getattr(_A ,_A ,_A ) ,_A ):
_lowerCAmelCase : Tuple = _config_zero_init(getattr(_A ,_A ) )
setattr(_A ,_A ,_A )
return configs_no_init
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = _config_zero_init(_A )
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(_A )
_lowerCAmelCase : List[Any] = self.default_image_processor
_lowerCAmelCase : str = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : str = model(**_A )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_A ,atol=1E-4 ) )
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCAmelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase : Any = get_sagemaker_input()
else:
_lowerCAmelCase : Optional[Any] = get_cluster_input()
return config
def lowerCamelCase__ ( _lowerCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
_lowerCAmelCase : List[Any] = subparsers.add_parser('config' , description=_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser('Accelerate config command' , description=_lowerCamelCase )
parser.add_argument(
'--config_file' , default=_lowerCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = get_user_input()
if args.config_file is not None:
_lowerCAmelCase : str = args.config_file
else:
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : int = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCamelCase )
else:
config.to_yaml_file(_lowerCamelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = config_command_parser()
_lowerCAmelCase : Dict = parser.parse_args()
config_command(_lowerCamelCase )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = PegasusTokenizer
_UpperCAmelCase = PegasusTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : str = PegasusTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = '</s>'
_lowerCAmelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) ,_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(_A ) ,1103 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase : int = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_lowerCAmelCase : str = rust_tokenizer([raw_input_str] ,return_tensors=_A ,add_special_tokens=_A ).input_ids[0]
_lowerCAmelCase : List[Any] = py_tokenizer([raw_input_str] ,return_tensors=_A ,add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowerCAmelCase : Optional[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_lowerCAmelCase : str = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_lowerCAmelCase : Union[str, Any] = tokenizer([raw_input_str] ,return_tensors=_A ).input_ids[0]
self.assertListEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_lowerCAmelCase : Dict = 'To ensure a smooth flow of bank resolutions.'
_lowerCAmelCase : Dict = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_lowerCAmelCase : str = tokenizer([raw_input_str] ,return_tensors=_A ).input_ids[0]
self.assertListEqual(_A ,_A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['This is going to be way too long.' * 150, 'short example']
_lowerCAmelCase : List[Any] = ['not super long but more than 5 tokens', 'tiny']
_lowerCAmelCase : List[Any] = self._large_tokenizer(_A ,padding=_A ,truncation=_A ,return_tensors='pt' )
_lowerCAmelCase : str = self._large_tokenizer(
text_target=_A ,max_length=5 ,padding=_A ,truncation=_A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = PegasusTokenizer
_UpperCAmelCase = PegasusTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Optional[Any] = PegasusTokenizer(_A ,offset=0 ,mask_token_sent=_A ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_lowerCAmelCase : List[str] = rust_tokenizer([raw_input_str] ,return_tensors=_A ,add_special_tokens=_A ).input_ids[0]
_lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] ,return_tensors=_A ,add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A ,_A )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ['This is going to be way too long.' * 1000, 'short example']
_lowerCAmelCase : Dict = ['not super long but more than 5 tokens', 'tiny']
_lowerCAmelCase : List[Any] = self._large_tokenizer(_A ,padding=_A ,truncation=_A ,return_tensors='pt' )
_lowerCAmelCase : int = self._large_tokenizer(
text_target=_A ,max_length=5 ,padding=_A ,truncation=_A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_lowerCAmelCase : int = self._large_tokenizer(_A ).input_ids
self.assertListEqual(
_A ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
_lowerCAmelCase : Optional[int] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : int = {'unk_token': '[UNK]'}
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'lower newer'
_lowerCAmelCase : Dict = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = 'lower newer'
_lowerCAmelCase : Dict = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(_A )
self.assertListEqual(_A ,_A )
_lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = tokenizer('Hello' ,'World' )
_lowerCAmelCase : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCAmelCase : Optional[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=_A )
_lowerCAmelCase : Optional[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=_A )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(
'sequence builders' ,add_special_tokens=_A ,add_prefix_space=_A )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=_A ,add_prefix_space=_A )
_lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(_A )
_lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(_A ,_A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowerCAmelCase : Dict = tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCAmelCase : Tuple = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
_lowerCAmelCase : List[str] = tokenizer(_A ,padding=_A )
_lowerCAmelCase : Dict = [tokenizer.decode(_A ,skip_special_tokens=_A ) for seq in encoding['input_ids']]
# fmt: off
_lowerCAmelCase : Tuple = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCAmelCase : List[str] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data ,_A )
for expected, decoded in zip(_A ,_A ):
self.assertEqual(_A ,_A )
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=32 ,_A=3 ,_A=4 ,_A=[10, 20, 30, 40] ,_A=[2, 2, 3, 2] ,_A=True ,_A=True ,_A=37 ,_A="gelu" ,_A=10 ,_A=0.0_2 ,_A=["stage2", "stage3", "stage4"] ,_A=[2, 3, 4] ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Any = num_stages
_lowerCAmelCase : List[str] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : List[Any] = out_features
_lowerCAmelCase : Union[str, Any] = out_indices
_lowerCAmelCase : int = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_A ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = ConvNextModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = ConvNextForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[str] = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Optional[Any] = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ConvNextModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(_A )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : int = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = ConvNextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_A )
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(images=_A ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : str = model(**_A )
# verify the logits
_lowerCAmelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : List[Any] = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_A ,atol=1E-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , a__ ):
_UpperCAmelCase = (ConvNextBackbone,) if is_torch_available() else ()
_UpperCAmelCase = ConvNextConfig
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ConvNextModelTester(self )
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCAmelCase = logging.getLogger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self ,_A=-1 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = label_idx
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if isinstance(_A ,_A ):
_lowerCAmelCase : Any = mode.value
_lowerCAmelCase : Optional[Any] = os.path.join(_A ,F"""{mode}.txt""" )
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = []
with open(_A ,encoding='utf-8' ) as f:
_lowerCAmelCase : Any = []
_lowerCAmelCase : Any = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_A ,labels=_A ) )
guid_index += 1
_lowerCAmelCase : str = []
_lowerCAmelCase : int = []
else:
_lowerCAmelCase : int = line.split(' ' )
words.append(splits[0] )
if len(_A ) > 1:
labels.append(splits[self.label_idx].replace('\n' ,'' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_A ,labels=_A ) )
return examples
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_A )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowerCAmelCase : int = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_A )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' ,line.split()[0] )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if path:
with open(_A ,'r' ) as f:
_lowerCAmelCase : Any = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( a__ ):
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if path:
with open(_A ,'r' ) as f:
_lowerCAmelCase : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Any = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( a__ ):
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if isinstance(_A ,_A ):
_lowerCAmelCase : Dict = mode.value
_lowerCAmelCase : Dict = os.path.join(_A ,F"""{mode}.txt""" )
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : str = []
with open(_A ,encoding='utf-8' ) as f:
for sentence in parse_incr(_A ):
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Any = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_A ) == len(_A )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_A ,labels=_A ) )
guid_index += 1
return examples
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
for sentence in parse_incr(_A ):
_lowerCAmelCase : str = preds_list[example_id]
_lowerCAmelCase : Tuple = ''
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_A )
example_id += 1
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if path:
with open(_A ,'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase__ ( _lowerCamelCase=32 , _lowerCamelCase=10 , _lowerCamelCase=100 , _lowerCamelCase=1026 , _lowerCamelCase=True , _lowerCamelCase="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_lowerCAmelCase, _lowerCAmelCase : int = generate_datasets(
_lowerCamelCase , _lowerCamelCase , number=_lowerCamelCase , min_len=1026 , trim=_lowerCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowerCAmelCase : int = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
_lowerCAmelCase : Dict = load_gpta('gpt2' ).to(_lowerCamelCase )
print('computing perplexity on objective set' )
_lowerCAmelCase : str = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).item()
print('perplexity on objective set:' , _lowerCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=15 , _lowerCamelCase=128 , _lowerCamelCase=100 , _lowerCamelCase="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_lowerCAmelCase : int = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
_lowerCAmelCase : Optional[int] = SecondaryLearner(_lowerCamelCase )
# Train secondary learner
_lowerCAmelCase : int = train_secondary_learner(
_lowerCamelCase , _lowerCamelCase , max_epochs=_lowerCamelCase , batch_size=_lowerCamelCase , eval_freq=100 , igf_model_path=_lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=32 , _lowerCamelCase=1000 , _lowerCamelCase=16 , _lowerCamelCase=1.0 , _lowerCamelCase=recopy_gpta , _lowerCamelCase=None , _lowerCamelCase=10 , _lowerCamelCase="gpt2_finetuned.pt" , ):
'''simple docstring'''
_lowerCAmelCase : str = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
_lowerCAmelCase : Tuple = RandomSampler(_lowerCamelCase )
_lowerCAmelCase : Tuple = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase )
_lowerCAmelCase : List[str] = max_steps // (len(_lowerCamelCase )) + 1
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCamelCase )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = recopy_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCamelCase )
secondary_learner.eval()
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = []
# Compute the performance of the transformer model at the beginning
_lowerCAmelCase : Any = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
test_perps.append(_lowerCamelCase )
print('Test perplexity, step' , _lowerCamelCase , ':' , _lowerCamelCase )
for epoch in range(int(_lowerCamelCase ) ):
for step, example in enumerate(_lowerCamelCase ):
torch.cuda.empty_cache()
_lowerCAmelCase : List[str] = random.randint(0 , example.size(2 ) - context_len - 1 )
_lowerCAmelCase : List[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowerCAmelCase : Optional[int] = model(_lowerCamelCase , labels=_lowerCamelCase )
_lowerCAmelCase : List[str] = True
if secondary_learner is not None:
_lowerCAmelCase : Dict = secondary_learner.forward(
torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowerCAmelCase : Union[str, Any] = -1
if predicted_q < threshold:
_lowerCAmelCase : List[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowerCAmelCase : Optional[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowerCAmelCase : str = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowerCAmelCase : Tuple = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
test_perps.append(_lowerCamelCase )
print('Test perplexity, step' , _lowerCamelCase , ':' , _lowerCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_lowerCamelCase , default=_lowerCamelCase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_lowerCamelCase , default=_lowerCamelCase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_lowerCamelCase , type=_lowerCamelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_lowerCamelCase , default=_lowerCamelCase , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_lowerCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_lowerCamelCase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_lowerCamelCase , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=_lowerCamelCase , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_lowerCamelCase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_lowerCamelCase , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_lowerCamelCase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_lowerCamelCase , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=_lowerCamelCase , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_lowerCamelCase , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_lowerCamelCase , type=_lowerCamelCase , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_lowerCamelCase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_lowerCamelCase , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_lowerCamelCase , type=_lowerCamelCase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowerCamelCase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
_lowerCAmelCase : Optional[int] = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
_lowerCAmelCase : str = training_secondary_learner(
_lowerCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
_lowerCAmelCase : Any = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowerCAmelCase, _lowerCAmelCase : Any = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=_lowerCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCamelCase , secondary_learner=_lowerCamelCase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_lowerCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCAmelCase : Union[str, Any] = bs[:]
_lowerCAmelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : str = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = set()
_lowerCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Optional[Any] = char
return pairs
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A ,_A="replace" ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A=False ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else bos_token
_lowerCAmelCase : Dict = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else eos_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else sep_token
_lowerCAmelCase : Any = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else cls_token
_lowerCAmelCase : Tuple = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else unk_token
_lowerCAmelCase : Optional[int] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : str = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
super().__init__(
errors=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,add_prefix_space=_A ,**_A ,)
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Tuple = json.load(_A )
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[Any] = errors # how to handle errors in decoding
_lowerCAmelCase : Optional[int] = bytes_to_unicode()
_lowerCAmelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Tuple = merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Optional[int] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Any = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : int = tuple(_A )
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : Dict = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : List[str] = bigram
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = 0
while i < len(_A ):
try:
_lowerCAmelCase : int = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : int = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : Tuple = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Tuple = ' '.join(_A )
_lowerCAmelCase : List[Any] = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for token in re.findall(self.pat ,_A ):
_lowerCAmelCase : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(' ' ) )
return bpe_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.decoder.get(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = ''.join(_A )
_lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Any = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[str] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : Optional[int] = 0
with open(_A ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Union[str, Any] = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : List[Any] = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self ,_A ,_A=False ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
_lowerCAmelCase : Union[str, Any] = ' ' + text
return (text, kwargs)
def __lowerCamelCase ( self ,_A ,_A = None ,_A = PaddingStrategy.DO_NOT_PAD ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Any = super()._pad(
encoded_inputs=_A ,max_length=_A ,padding_strategy=_A ,pad_to_multiple_of=_A ,return_attention_mask=_A ,)
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase : Union[str, Any] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase : Any = len(encoded_inputs['global_attention_mask'] ) != len(_A )
if needs_to_be_padded:
_lowerCAmelCase : Optional[Any] = len(_A ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase : Optional[Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' ,_A ,)
super().__init__(*_A ,**_A )
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=7 ,_A=3 ,_A=30 ,_A=400 ,_A=True ,_A=None ,_A=0.9 ,_A=None ,_A=True ,_A=[0.5, 0.5, 0.5] ,_A=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 30}
_lowerCAmelCase : Any = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : str = min_resolution
_lowerCAmelCase : List[str] = max_resolution
_lowerCAmelCase : Any = do_resize_and_center_crop
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : List[Any] = crop_pct
_lowerCAmelCase : Optional[Any] = crop_size
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Optional[int] = image_std
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A ,'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_A ,'size' ) )
self.assertTrue(hasattr(_A ,'crop_pct' ) )
self.assertTrue(hasattr(_A ,'do_normalize' ) )
self.assertTrue(hasattr(_A ,'image_mean' ) )
self.assertTrue(hasattr(_A ,'image_std' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size ,{'height': 30, 'width': 30} )
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A ,Image.Image )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Any = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,torch.Tensor )
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : List[Any] = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __UpperCamelCase :
_UpperCAmelCase = LEDConfig
_UpperCAmelCase = {}
_UpperCAmelCase = "gelu"
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=False ,_A=99 ,_A=32 ,_A=2 ,_A=4 ,_A=37 ,_A=0.1 ,_A=0.1 ,_A=20 ,_A=2 ,_A=1 ,_A=0 ,_A=4 ,):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : List[str] = eos_token_id
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = bos_token_id
_lowerCAmelCase : Dict = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCAmelCase : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCAmelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
_lowerCAmelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
_lowerCAmelCase : Any = tf.concat([input_ids, eos_tensor] ,axis=1 )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : Any = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,)
_lowerCAmelCase : Tuple = prepare_led_inputs_dict(_A ,_A ,_A )
_lowerCAmelCase : Dict = tf.concat(
[tf.zeros_like(_A )[:, :-1], tf.ones_like(_A )[:, -1:]] ,axis=-1 ,)
_lowerCAmelCase : Any = global_attention_mask
return config, inputs_dict
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLEDModel(config=_A ).get_decoder()
_lowerCAmelCase : Tuple = inputs_dict['input_ids']
_lowerCAmelCase : Union[str, Any] = input_ids[:1, :]
_lowerCAmelCase : int = inputs_dict['attention_mask'][:1, :]
_lowerCAmelCase : Dict = 1
# first forward pass
_lowerCAmelCase : str = model(_A ,attention_mask=_A ,use_cache=_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowerCAmelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
_lowerCAmelCase : str = tf.concat([input_ids, next_tokens] ,axis=-1 )
_lowerCAmelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
_lowerCAmelCase : Optional[int] = model(_A ,attention_mask=_A )[0]
_lowerCAmelCase : int = model(_A ,attention_mask=_A ,past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
_lowerCAmelCase : Tuple = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
_lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A ,_A ,rtol=1E-3 )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
_lowerCAmelCase : Optional[Any] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TFLEDModelTester(self )
_lowerCAmelCase : Optional[Any] = ConfigTester(self ,config_class=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = tf.zeros_like(inputs_dict['attention_mask'] )
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict['global_attention_mask'] ,)
_lowerCAmelCase : int = True
_lowerCAmelCase : int = self.model_tester.seq_length
_lowerCAmelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_A ):
_lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
def check_encoder_attentions_output(_A ):
_lowerCAmelCase : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
_lowerCAmelCase : Dict = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,)
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Any = model_class(_A )
_lowerCAmelCase : Union[str, Any] = model(self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : List[str] = len(_A )
self.assertEqual(config.output_hidden_states ,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : List[str] = model(self._prepare_for_class(_A ,_A ) )
self.assertEqual(config.output_hidden_states ,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Optional[Any] = model_class(_A )
_lowerCAmelCase : Any = model(self._prepare_for_class(_A ,_A ) )
self.assertEqual(config.output_hidden_states ,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
_lowerCAmelCase : Any = True
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Union[str, Any] = model_class(_A )
_lowerCAmelCase : Optional[Any] = model(self._prepare_for_class(_A ,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(_A ) )
self.assertEqual(model.config.output_hidden_states ,_A )
check_encoder_attentions_output(_A )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
_lowerCAmelCase = 1E-4
@slow
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_lowerCAmelCase : Dict = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase : Dict = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase : Union[str, Any] = prepare_led_inputs_dict(model.config ,_A ,_A )
_lowerCAmelCase : Union[str, Any] = model(**_A )[0]
_lowerCAmelCase : Optional[Any] = (1, 1024, 768)
self.assertEqual(output.shape ,_A )
# change to expected output here
_lowerCAmelCase : Tuple = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,_A ,atol=1E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_lowerCAmelCase : List[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase : Any = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_lowerCAmelCase : List[str] = prepare_led_inputs_dict(model.config ,_A ,_A )
_lowerCAmelCase : List[str] = model(**_A )[0]
_lowerCAmelCase : List[Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape ,_A )
# change to expected output here
_lowerCAmelCase : Dict = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,_A ,atol=1E-3 ,rtol=1E-3 )
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=7 ,_A=True ,_A=True ,_A=False ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Dict = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[Any] = None
if self.use_input_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_A ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=_A ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FalconModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A )
_lowerCAmelCase : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : str = FalconModel(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)
_lowerCAmelCase : Union[str, Any] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,)
_lowerCAmelCase : Optional[Any] = model(_A ,attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = FalconForCausalLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : int = True
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : int = FalconForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
_lowerCAmelCase : Optional[int] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,use_cache=_A ,)
_lowerCAmelCase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase : Dict = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowerCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
_lowerCAmelCase : str = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,output_hidden_states=_A ,)['hidden_states'][0]
_lowerCAmelCase : Optional[Any] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,past_key_values=_A ,output_hidden_states=_A ,)['hidden_states'][0]
# select random slice
_lowerCAmelCase : List[Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A ,_A ,atol=1E-3 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : int = config_and_inputs
_lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = FalconModelTester(self )
_lowerCAmelCase : int = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, *_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowerCAmelCase : Optional[int] = alibi
self.model_tester.create_and_check_model(_A ,*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[Any] = input_dict['input_ids']
_lowerCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(_A )
_lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(_A ,attention_mask=_A ,labels=_A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = 3
_lowerCAmelCase : Dict = 'single_label_classification'
_lowerCAmelCase : Union[str, Any] = input_dict['input_ids']
_lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(_A )
_lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A ,attention_mask=_A ,labels=_A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = input_dict['input_ids']
_lowerCAmelCase : int = FalconForCausalLM(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_A ,use_cache=_A )
_lowerCAmelCase : str = input_ids.shape[0]
_lowerCAmelCase : Optional[Any] = model._convert_to_rw_cache(result.past_key_values )
_lowerCAmelCase : int = model._convert_cache_to_standard_format(_A ,_A )
for layer in range(len(_A ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = 3
_lowerCAmelCase : Optional[Any] = 'multi_label_classification'
_lowerCAmelCase : List[str] = input_dict['input_ids']
_lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(_A )
_lowerCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase : Tuple = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A ,attention_mask=_A ,labels=_A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
_lowerCAmelCase, _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_A ,'use_cache' ):
return
_lowerCAmelCase : Dict = model_class(_A ).to(_A )
if "use_cache" not in inputs:
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Any = model(**_A )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowerCAmelCase : List[Any] = (
getattr(_A ,'decoder_layers' ,_A )
or getattr(_A ,'num_decoder_layers' ,_A )
or config.num_hidden_layers
)
_lowerCAmelCase : Dict = getattr(_A ,'num_kv_heads' ,config.num_attention_heads )
_lowerCAmelCase : Any = getattr(_A ,'d_model' ,config.hidden_size )
_lowerCAmelCase : Dict = embed_dim // num_attention_heads
_lowerCAmelCase : Optional[Any] = outputs['past_key_values']
self.assertEqual(len(_A ) ,_A )
_lowerCAmelCase, _lowerCAmelCase : Any = inputs['input_ids'].shape
for i in range(_A ):
if config.new_decoder_architecture:
_lowerCAmelCase : int = config.num_attention_heads
elif config.multi_query:
_lowerCAmelCase : Tuple = 1
self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
_lowerCAmelCase : int = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(_A )
_lowerCAmelCase : str = tokenizer('My favorite food is' ,return_tensors='pt' ).to(_A )
_lowerCAmelCase : int = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
_lowerCAmelCase : Optional[int] = model.generate(**_A ,do_sample=_A ,max_new_tokens=19 )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(_A )[0]
self.assertEqual(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_A )
_lowerCAmelCase : Optional[Any] = FalconForCausalLM.from_pretrained(_A )
model.eval()
model.to(_A )
_lowerCAmelCase : Optional[Any] = tokenizer('My favorite food is' ,return_tensors='pt' ).to(_A )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_A ,do_sample=_A ,max_new_tokens=4 )
model.generate(**_A ,do_sample=_A ,max_new_tokens=4 )
model.generate(**_A ,num_beams=2 ,max_new_tokens=4 )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_A )
_lowerCAmelCase : List[Any] = FalconForCausalLM.from_pretrained(_A )
model.eval()
model.to(device=_A )
_lowerCAmelCase : Optional[Any] = tokenizer('My favorite food is' ,return_tensors='pt' ).to(_A )
# Test results are the same with and without cache
_lowerCAmelCase : Optional[int] = model.generate(**_A ,do_sample=_A ,max_new_tokens=20 ,use_cache=_A )
_lowerCAmelCase : Union[str, Any] = model.generate(**_A ,do_sample=_A ,max_new_tokens=20 ,use_cache=_A )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase = 10 , _lowerCamelCase = 2 ):
'''simple docstring'''
def get_dataset(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCAmelCase : Any = get_dataset(_lowerCamelCase )
_lowerCAmelCase : str = get_dataset(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , batch_size=_lowerCamelCase , num_workers=4 )
_lowerCAmelCase : int = DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , batch_size=_lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = []
for epoch in range(_lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = batch
_lowerCAmelCase : Union[str, Any] = model(_lowerCamelCase )
_lowerCAmelCase : int = torch.nn.functional.mse_loss(_lowerCamelCase , _lowerCamelCase )
accelerator.backward(_lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __UpperCamelCase ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Parameter(torch.randn(1 ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.randn(1 ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return x * self.a + self.b
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : Union[str, Any] = DummyModel()
_lowerCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = dummy_dataloaders()
_lowerCAmelCase : str = ProjectConfiguration(total_limit=1 ,project_dir=_A ,automatic_checkpoint_naming=_A )
# Train baseline
_lowerCAmelCase : Any = Accelerator(project_config=_A )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = accelerator.prepare(
_A ,_A ,_A ,_A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : int = DummyModel()
_lowerCAmelCase : Any = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
_lowerCAmelCase, _lowerCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_lowerCAmelCase : Any = Accelerator()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = accelerator.prepare(
_A ,_A ,_A ,_A )
# Save initial
_lowerCAmelCase : Any = os.path.join(_A ,'initial' )
accelerator.save_state(_A )
((_lowerCAmelCase), (_lowerCAmelCase)) : List[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Any = optimizer.state_dict()
_lowerCAmelCase : Any = train(3 ,_A ,_A ,_A ,_A )
((_lowerCAmelCase), (_lowerCAmelCase)) : List[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCAmelCase : Dict = DummyModel()
_lowerCAmelCase : Any = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
_lowerCAmelCase, _lowerCAmelCase : int = dummy_dataloaders()
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = accelerator.prepare(
_A ,_A ,_A ,_A )
accelerator.load_state(_A )
((_lowerCAmelCase), (_lowerCAmelCase)) : List[str] = model.a.item(), model.b.item()
_lowerCAmelCase : Any = optimizer.state_dict()
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
_lowerCAmelCase : List[str] = train(2 ,_A ,_A ,_A ,_A )
# Save everything
_lowerCAmelCase : List[str] = os.path.join(_A ,'checkpoint' )
accelerator.save_state(_A )
# Load everything back in and make sure all states work
accelerator.load_state(_A )
test_rands += train(1 ,_A ,_A ,_A ,_A )
((_lowerCAmelCase), (_lowerCAmelCase)) : Tuple = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : int = DummyModel()
_lowerCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = dummy_dataloaders()
_lowerCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
_lowerCAmelCase : List[Any] = Accelerator(project_dir=_A ,project_config=_A )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = accelerator.prepare(
_A ,_A ,_A ,_A )
# Save initial
accelerator.save_state()
((_lowerCAmelCase), (_lowerCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
_lowerCAmelCase : Optional[Any] = train(3 ,_A ,_A ,_A ,_A )
((_lowerCAmelCase), (_lowerCAmelCase)) : int = model.a.item(), model.b.item()
_lowerCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCAmelCase : List[Any] = DummyModel()
_lowerCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
_lowerCAmelCase, _lowerCAmelCase : Dict = dummy_dataloaders()
_lowerCAmelCase : Dict = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=_A )
_lowerCAmelCase : List[str] = Accelerator(project_dir=_A ,project_config=_A )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = accelerator.prepare(
_A ,_A ,_A ,_A )
accelerator.load_state(os.path.join(_A ,'checkpoints' ,'checkpoint_0' ) )
((_lowerCAmelCase), (_lowerCAmelCase)) : List[str] = model.a.item(), model.b.item()
_lowerCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
_lowerCAmelCase : Dict = train(2 ,_A ,_A ,_A ,_A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A ,'checkpoints' ,'checkpoint_1' ) )
test_rands += train(1 ,_A ,_A ,_A ,_A )
((_lowerCAmelCase), (_lowerCAmelCase)) : str = model.a.item(), model.b.item()
_lowerCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
self.assertEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.tensor([1, 2, 3] )
_lowerCAmelCase : Union[str, Any] = torch.tensor([2, 3, 4] )
_lowerCAmelCase : str = DummyModel()
_lowerCAmelCase : Union[str, Any] = torch.optim.Adam(net.parameters() )
_lowerCAmelCase : Any = Accelerator()
with self.assertRaises(_A ) as ve:
accelerator.register_for_checkpointing(_A ,_A ,_A ,_A )
_lowerCAmelCase : int = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : str = DummyModel()
_lowerCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
_lowerCAmelCase : Any = torch.optim.lr_scheduler.StepLR(_A ,step_size=1 ,gamma=0.9_9 )
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = dummy_dataloaders()
_lowerCAmelCase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
_lowerCAmelCase : Optional[Any] = Accelerator(project_dir=_A ,project_config=_A )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = accelerator.prepare(
_A ,_A ,_A ,_A ,_A )
# Save initial
accelerator.save_state()
_lowerCAmelCase : str = scheduler.state_dict()
train(3 ,_A ,_A ,_A ,_A ,_A )
self.assertNotEqual(_A ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A ,'checkpoints' ,'checkpoint_0' ) )
self.assertEqual(_A ,scheduler.state_dict() )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCAmelCase : Any = DummyModel()
_lowerCAmelCase : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_A ,total_limit=2 )
# Train baseline
_lowerCAmelCase : int = Accelerator(project_dir=_A ,project_config=_A )
_lowerCAmelCase : Dict = accelerator.prepare(_A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_A ,'checkpoints' ,'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A ,'checkpoints' ,'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A ,'checkpoints' ,'checkpoint_10' ) ) )
@require_cuda
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = """/tmp/accelerate/state_checkpointing"""
_lowerCAmelCase = DummyModel()
_lowerCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase , _lowerCAmelCase = dummy_dataloaders()
_lowerCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
_lowerCAmelCase = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
_lowerCAmelCase = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowerCAmelCase : List[str] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
_lowerCAmelCase : Optional[Any] = str(bin(_lowerCamelCase ) )[2:]
_lowerCAmelCase : Optional[int] = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return [ord(_lowerCamelCase ) - 96 for elem in plain]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , _lowerCamelCase )
print('Decoded:' , decode(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Union[str, Any] = np.zeros((n + 1,) )
_lowerCAmelCase : Union[str, Any] = ya
_lowerCAmelCase : Union[str, Any] = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
_lowerCAmelCase : Any = y[k] + (
(step_size / 2) * (ode_func(_lowerCamelCase , y[k] ) + ode_func(x + step_size , _lowerCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
if start < end:
_lowerCAmelCase : Optional[Any] = randint(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = a[end]
_lowerCAmelCase : int = a[pivot]
_lowerCAmelCase : List[str] = temp
_lowerCAmelCase, _lowerCAmelCase : Any = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = randint(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = a[end]
_lowerCAmelCase : Tuple = a[pivot]
_lowerCAmelCase : str = temp
_lowerCAmelCase : Tuple = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowerCAmelCase : str = new_pivot_index + 1
_lowerCAmelCase : Tuple = a[new_pivot_index]
_lowerCAmelCase : Optional[Any] = a[index]
_lowerCAmelCase : int = temp
_lowerCAmelCase : str = a[new_pivot_index + 1]
_lowerCAmelCase : List[Any] = a[end]
_lowerCAmelCase : Tuple = temp
return new_pivot_index + 1, count
_lowerCAmelCase = TemporaryFile()
_lowerCAmelCase = 1_0_0 # 1000 elements are to be sorted
_lowerCAmelCase , _lowerCAmelCase = 0, 1 # mean and standard deviation
_lowerCAmelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
_lowerCAmelCase = np.load(outfile)
_lowerCAmelCase = len(M) - 1
_lowerCAmelCase = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.