code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : int = tau * frequency / samplerate
snake_case_ : Any = sin(__UpperCamelCase )
snake_case_ : str = cos(__UpperCamelCase )
snake_case_ : List[Any] = _sin / (2 * q_factor)
snake_case_ : List[str] = (1 - _cos) / 2
snake_case_ : str = 1 - _cos
snake_case_ : Union[str, Any] = 1 + alpha
snake_case_ : Tuple = -2 * _cos
snake_case_ : int = 1 - alpha
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Any = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Tuple = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : str = (1 + _cos) / 2
snake_case_ : Union[str, Any] = -1 - _cos
snake_case_ : List[Any] = 1 + alpha
snake_case_ : str = -2 * _cos
snake_case_ : Optional[int] = 1 - alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : str = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Any = cos(__UpperCamelCase )
snake_case_ : Optional[Any] = _sin / (2 * q_factor)
snake_case_ : str = _sin / 2
snake_case_ : List[Any] = 0
snake_case_ : Any = -ba
snake_case_ : Optional[int] = 1 + alpha
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : Any = 1 - alpha
snake_case_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : int = tau * frequency / samplerate
snake_case_ : str = sin(__UpperCamelCase )
snake_case_ : Optional[int] = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : List[str] = 1 - alpha
snake_case_ : Optional[Any] = -2 * _cos
snake_case_ : List[Any] = 1 + alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tau * frequency / samplerate
snake_case_ : Optional[Any] = sin(__UpperCamelCase )
snake_case_ : Tuple = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : Any = 1_0 ** (gain_db / 4_0)
snake_case_ : Optional[Any] = 1 + alpha * big_a
snake_case_ : Any = -2 * _cos
snake_case_ : Any = 1 - alpha * big_a
snake_case_ : Union[str, Any] = 1 + alpha / big_a
snake_case_ : List[str] = -2 * _cos
snake_case_ : int = 1 - alpha / big_a
snake_case_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : List[Any] = tau * frequency / samplerate
snake_case_ : List[str] = sin(__UpperCamelCase )
snake_case_ : Optional[int] = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : int = 1_0 ** (gain_db / 4_0)
snake_case_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : int = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : List[str] = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[int] = big_a * (pmc + aaa)
snake_case_ : Optional[Any] = 2 * big_a * mpc
snake_case_ : Union[str, Any] = big_a * (pmc - aaa)
snake_case_ : Dict = ppmc + aaa
snake_case_ : str = -2 * pmpc
snake_case_ : str = ppmc - aaa
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Dict = tau * frequency / samplerate
snake_case_ : int = sin(__UpperCamelCase )
snake_case_ : List[Any] = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : Dict = 1_0 ** (gain_db / 4_0)
snake_case_ : Dict = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Any = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : List[str] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : int = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[Any] = big_a * (ppmc + aaa)
snake_case_ : List[str] = -2 * big_a * pmpc
snake_case_ : Any = big_a * (ppmc - aaa)
snake_case_ : Optional[Any] = pmc + aaa
snake_case_ : Optional[Any] = 2 * mpc
snake_case_ : str = pmc - aaa
snake_case_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 21 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 1 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowerCAmelCase : Optional[int] = 2048
__lowerCAmelCase : int = 4096
__lowerCAmelCase : Union[str, Any] = 42
__lowerCAmelCase : List[Any] = os.environ.pop('''PROCESS_TRAIN''', '''false''')
__lowerCAmelCase : Union[str, Any] = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
def choose_first(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any=False ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
snake_case_ : Optional[int] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case_ : Optional[int] = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
snake_case_ : str = {"""id""": example["""id"""]}
snake_case_ : Tuple = example["""annotations"""]
snake_case_ : Dict = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case_ : Union[str, Any] = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = ["""<cls>"""]
else:
snake_case_ : Dict = ["""short"""]
snake_case_ : Any = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
snake_case_ : Optional[int] = ["""long"""]
snake_case_ : List[str] = choose_first(annotation["""long_answer"""] , is_long_answer=__UpperCamelCase )
snake_case_ : Tuple = []
answer.update(__UpperCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case_ : Optional[Any] = True
else:
snake_case_ : List[str] = False
snake_case_ : List[Any] = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , __UpperCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
snake_case_ : Dict = _get_single_answer(__UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case_ : Any = example["""document"""]["""tokens"""]
snake_case_ : Optional[int] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(__UpperCamelCase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case_ : Tuple = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case_ : Union[str, Any] = example["""document"""]["""tokens"""]
snake_case_ : Dict = answer["""start_token"""]
snake_case_ : Union[str, Any] = answer["""end_token"""]
snake_case_ : Tuple = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case_ : int = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case_ : List[Any] = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case_ : Dict = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case_ : Optional[Any] = """ """.join([old[i] for i in range(len(__UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , __UpperCamelCase , end="""\n""" )
print("""Old:""" , __UpperCamelCase , end="""\n\n""" )
return {
"context": " ".join(__UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Dict=2_0_4_8 , __UpperCamelCase : Tuple=4_0_9_6 , __UpperCamelCase : Optional[int]=True ):
'''simple docstring'''
snake_case_ : Tuple = get_context_and_ans(__UpperCamelCase , assertion=__UpperCamelCase )
snake_case_ : Optional[int] = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case_ : str = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
snake_case_ : Dict = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case_ : Tuple = []
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = input_ids[:q_len]
snake_case_ : List[Any] = range(__UpperCamelCase , len(__UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case_ : Any = i + max_length - q_len
snake_case_ : List[str] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__UpperCamelCase ),
"end_token": [-1_0_0] * len(__UpperCamelCase ),
"category": category,
},
}
snake_case_ : List[Any] = out["""context"""].split()
snake_case_ : Union[str, Any] = splitted_context[answer["""end_token"""]]
snake_case_ : str = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=__UpperCamelCase , ).input_ids )
snake_case_ : Tuple = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=__UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case_ : str = len(tokenizer(__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case_ : List[str] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
snake_case_ : Optional[int] = answer["""start_token"""]
snake_case_ : List[str] = answer["""end_token"""]
if assertion:
snake_case_ : str = tokenizer.decode(__UpperCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , __UpperCamelCase , end="""\n\n""" )
if len(__UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case_ : int = input_ids[:q_len]
snake_case_ : Tuple = range(__UpperCamelCase , len(__UpperCamelCase ) , max_length - doc_stride )
snake_case_ : Dict = []
snake_case_ : Optional[Any] = []
snake_case_ : Dict = []
snake_case_ : Dict = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case_ : List[Any] = i + max_length - q_len
snake_case_ : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case_ : Any = start_token - i + q_len
snake_case_ : Tuple = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
snake_case_ : Union[str, Any] = -1_0_0
snake_case_ : Union[str, Any] = -1_0_0
answers_category.append("""null""" )
snake_case_ : List[str] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__UpperCamelCase )
answers_end_token.append(__UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(__UpperCamelCase ) )
print("""Old:""" , tokenizer.decode(__UpperCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : int=2_0_4_8 , __UpperCamelCase : Tuple=4_0_9_6 , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
snake_case_ : int = get_strided_contexts_and_ans(
__UpperCamelCase , __UpperCamelCase , doc_stride=__UpperCamelCase , max_length=__UpperCamelCase , assertion=__UpperCamelCase , )
return example
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
with jsonlines.open(__UpperCamelCase , """a""" ) as writer:
for example in tqdm(__UpperCamelCase , total=len(__UpperCamelCase ) , desc="""Saving samples ... """ ):
snake_case_ : int = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowerCAmelCase : Tuple = load_dataset('''natural_questions''')
__lowerCAmelCase : Optional[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__lowerCAmelCase : Any = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__lowerCAmelCase : Tuple = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__lowerCAmelCase : Optional[int] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowerCAmelCase : int = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__lowerCAmelCase : str = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gptsan-japanese'''
_lowerCamelCase = [
'''past_key_values''',
]
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=3_6_0_0_0 , _lowercase=1_2_8_0 , _lowercase=1_0_2_4 , _lowercase=8_1_9_2 , _lowercase=4_0_9_6 , _lowercase=1_2_8 , _lowercase=1_0 , _lowercase=0 , _lowercase=1_6 , _lowercase=1_6 , _lowercase=1_2_8 , _lowercase=0.0 , _lowercase=1E-5 , _lowercase=False , _lowercase=0.0 , _lowercase="float32" , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=0.002 , _lowercase=False , _lowercase=True , _lowercase=3_5_9_9_8 , _lowercase=3_5_9_9_5 , _lowercase=3_5_9_9_9 , **_lowercase , ) -> int:
'''simple docstring'''
snake_case_ : Any = vocab_size
snake_case_ : int = max_position_embeddings
snake_case_ : List[Any] = d_model
snake_case_ : Any = d_ff
snake_case_ : List[str] = d_ext
snake_case_ : str = d_spout
snake_case_ : str = num_switch_layers
snake_case_ : Optional[Any] = num_ext_layers
snake_case_ : Optional[Any] = num_switch_layers + num_ext_layers
snake_case_ : List[str] = num_heads
snake_case_ : Any = num_experts
snake_case_ : List[str] = expert_capacity
snake_case_ : Dict = dropout_rate
snake_case_ : Optional[Any] = layer_norm_epsilon
snake_case_ : List[Any] = router_bias
snake_case_ : Tuple = router_jitter_noise
snake_case_ : str = router_dtype
snake_case_ : Tuple = router_ignore_padding_tokens
snake_case_ : str = output_hidden_states
snake_case_ : Optional[Any] = output_attentions
snake_case_ : str = initializer_factor
snake_case_ : Tuple = output_router_logits
snake_case_ : List[Any] = use_cache
super().__init__(
separator_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=2 , _lowercase=True , _lowercase=False , _lowercase=1_0 , _lowercase=3 , _lowercase=3_2 * 8 , _lowercase=3_2 * 8 , _lowercase=4 , _lowercase=6_4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = is_training
snake_case_ : int = use_auxiliary_loss
snake_case_ : List[str] = num_queries
snake_case_ : Tuple = num_channels
snake_case_ : Any = min_size
snake_case_ : List[Any] = max_size
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = hidden_dim
snake_case_ : Any = hidden_dim
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
snake_case_ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
snake_case_ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
snake_case_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case_ : List[Any] = self.num_queries
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Union[str, Any] = [1, 1, 1, 1]
snake_case_ : str = self.num_channels
snake_case_ : Any = 6_4
snake_case_ : int = 1_2_8
snake_case_ : Optional[Any] = self.hidden_dim
snake_case_ : Any = self.hidden_dim
snake_case_ : str = self.hidden_dim
return config
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ : Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = output.encoder_hidden_states
snake_case_ : Dict = output.pixel_decoder_hidden_states
snake_case_ : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_layers )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=False ) -> int:
'''simple docstring'''
with torch.no_grad():
snake_case_ : Dict = MaskaFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Optional[int] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
snake_case_ : Optional[Any] = model(_lowercase , output_hidden_states=_lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : List[str] = MaskaFormerForUniversalSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(_lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ : List[str] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
snake_case_ : List[Any] = model(_lowercase )
comm_check_on_output(_lowercase )
snake_case_ : Optional[Any] = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = MaskaFormerModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(_lowercase )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : str = [*signature.parameters.keys()]
snake_case_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case_ : Tuple = MaskaFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = (self.model_tester.min_size,) * 2
snake_case_ : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowercase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=_lowercase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=_lowercase ).long(),
}
snake_case_ : int = self.model_tester.get_config()
snake_case_ : int = MaskaFormerForUniversalSegmentation(_lowercase ).to(_lowercase )
snake_case_ : List[str] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_lowercase ).to(_lowercase )
snake_case_ : Optional[int] = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
snake_case_ : int = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
snake_case_ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
snake_case_ : Optional[Any] = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
snake_case_ : str = True
snake_case_ : List[str] = True
snake_case_ : str = model_class(_lowercase ).to(_lowercase )
model.train()
snake_case_ : Dict = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
snake_case_ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case_ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase : List[str] = 1e-4
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowercase )
snake_case_ : List[str] = self.default_image_processor
snake_case_ : List[str] = prepare_img()
snake_case_ : Any = image_processor(_lowercase , return_tensors="""pt""" ).to(_lowercase )
snake_case_ : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**_lowercase )
snake_case_ : Optional[int] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
snake_case_ : List[str] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
snake_case_ : List[str] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
snake_case_ : str = self.default_image_processor
snake_case_ : Any = prepare_img()
snake_case_ : str = image_processor(_lowercase , return_tensors="""pt""" ).to(_lowercase )
snake_case_ : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case_ : List[str] = model(**_lowercase )
# masks_queries_logits
snake_case_ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case_ : Union[str, Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case_ : int = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
snake_case_ : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case_ : int = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
snake_case_ : Union[str, Any] = self.default_image_processor
snake_case_ : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
snake_case_ : List[Any] = inputs["""pixel_values"""].to(_lowercase )
snake_case_ : Optional[int] = [el.to(_lowercase ) for el in inputs["""mask_labels"""]]
snake_case_ : Any = [el.to(_lowercase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
snake_case_ : Optional[Any] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 21 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 1 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = None
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Dict=0.999 , __UpperCamelCase : Optional[Any]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
snake_case_ : List[str] = []
for i in range(__UpperCamelCase ):
snake_case_ : Optional[Any] = i / num_diffusion_timesteps
snake_case_ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_0_0_0 , _lowercase = "fixed_small_log" , _lowercase = True , _lowercase = 1.0 , _lowercase = "epsilon" , _lowercase = "squaredcos_cap_v2" , ) -> Tuple:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
snake_case_ : List[str] = betas_for_alpha_bar(_lowercase )
snake_case_ : Any = 1.0 - self.betas
snake_case_ : Any = torch.cumprod(self.alphas , dim=0 )
snake_case_ : Optional[Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case_ : str = 1.0
# setable values
snake_case_ : Optional[int] = None
snake_case_ : str = torch.from_numpy(np.arange(0 , _lowercase )[::-1].copy() )
snake_case_ : Dict = variance_type
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = num_inference_steps
snake_case_ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case_ : Optional[Any] = (np.arange(0 , _lowercase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case_ : List[Any] = torch.from_numpy(_lowercase ).to(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ) -> Any:
'''simple docstring'''
if prev_timestep is None:
snake_case_ : List[str] = t - 1
snake_case_ : Tuple = self.alphas_cumprod[t]
snake_case_ : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ : Dict = 1 - alpha_prod_t
snake_case_ : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ : int = self.betas[t]
else:
snake_case_ : Optional[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ : Tuple = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case_ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case_ : Tuple = torch.log(torch.clamp(_lowercase , min=1E-20 ) )
snake_case_ : int = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case_ : Optional[int] = variance.log()
snake_case_ : int = beta.log()
snake_case_ : Dict = (predicted_variance + 1) / 2
snake_case_ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase=None , _lowercase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case_ , snake_case_ : Optional[int] = torch.split(_lowercase , sample.shape[1] , dim=1 )
else:
snake_case_ : Union[str, Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case_ : int = t - 1
snake_case_ : int = self.alphas_cumprod[t]
snake_case_ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ : Optional[int] = 1 - alpha_prod_t
snake_case_ : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ : str = self.betas[t]
snake_case_ : str = self.alphas[t]
else:
snake_case_ : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case_ : List[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ : str = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ : str = torch.clamp(
_lowercase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case_ : Union[str, Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ : List[str] = 0
if t > 0:
snake_case_ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_lowercase , device=model_output.device )
snake_case_ : str = self._get_variance(
_lowercase , predicted_variance=_lowercase , prev_timestep=_lowercase , )
if self.variance_type == "fixed_small_log":
snake_case_ : List[str] = variance
elif self.variance_type == "learned_range":
snake_case_ : List[Any] = (0.5 * variance).exp()
else:
raise ValueError(
f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
snake_case_ : Optional[Any] = variance * variance_noise
snake_case_ : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_lowercase , pred_original_sample=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case_ : Tuple = timesteps.to(original_samples.device )
snake_case_ : Any = alphas_cumprod[timesteps] ** 0.5
snake_case_ : int = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ : Dict = sqrt_alpha_prod.unsqueeze(-1 )
snake_case_ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case_ : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_0_0 , _lowercase=1_3 , _lowercase=3_0 , _lowercase=2 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=3_2 , _lowercase=4 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_0 , _lowercase=0.02 , _lowercase=3 , _lowercase=None , _lowercase=[0, 1, 2, 3] , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : str = 1_0_0
snake_case_ : str = batch_size
snake_case_ : Dict = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : Any = num_channels
snake_case_ : List[str] = is_training
snake_case_ : List[str] = use_labels
snake_case_ : Any = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Any = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = scope
snake_case_ : Dict = out_indices
snake_case_ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : List[Any] = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = BeitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = BeitForMaskedImageModeling(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.type_sequence_label_size
snake_case_ : Dict = BeitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : int = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Dict = 1
snake_case_ : Tuple = BeitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Optional[int] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.num_labels
snake_case_ : Any = BeitForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Optional[int] = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case_ : List[Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = config_and_inputs
snake_case_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = BeitModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(_lowercase )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowercase ), BeitForMaskedImageModeling]:
continue
snake_case_ : Optional[int] = model_class(_lowercase )
model.to(_lowercase )
model.train()
snake_case_ : Tuple = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
snake_case_ : str = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ : Dict = False
snake_case_ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowercase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ : int = model_class(_lowercase )
model.gradient_checkpointing_enable()
model.to(_lowercase )
model.train()
snake_case_ : List[Any] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
snake_case_ : List[Any] = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class(config=_lowercase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = BeitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(_lowercase )
snake_case_ : str = self.default_image_processor
snake_case_ : int = prepare_img()
snake_case_ : Optional[int] = image_processor(images=_lowercase , return_tensors="""pt""" ).pixel_values.to(_lowercase )
# prepare bool_masked_pos
snake_case_ : Optional[Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case_ : Dict = model(pixel_values=_lowercase , bool_masked_pos=_lowercase )
snake_case_ : str = outputs.logits
# verify the logits
snake_case_ : str = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , _lowercase )
snake_case_ : int = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowercase , atol=1E-2 ) )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(_lowercase )
snake_case_ : Tuple = self.default_image_processor
snake_case_ : Any = prepare_img()
snake_case_ : Dict = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**_lowercase )
snake_case_ : Dict = outputs.logits
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , _lowercase )
snake_case_ : Any = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowercase , atol=1E-4 ) )
snake_case_ : int = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
_lowercase )
snake_case_ : int = self.default_image_processor
snake_case_ : int = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case_ : Any = model(**_lowercase )
snake_case_ : str = outputs.logits
# verify the logits
snake_case_ : List[str] = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , _lowercase )
snake_case_ : Union[str, Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowercase , atol=1E-4 ) )
snake_case_ : Dict = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case_ : int = model.to(_lowercase )
snake_case_ : Any = BeitImageProcessor(do_resize=_lowercase , size=6_4_0 , do_center_crop=_lowercase )
snake_case_ : List[str] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ : Tuple = Image.open(ds[0]["""file"""] )
snake_case_ : Any = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case_ : List[str] = model(**_lowercase )
snake_case_ : str = outputs.logits
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , _lowercase )
snake_case_ : int = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
snake_case_ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_lowercase , )
else:
snake_case_ : List[str] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case_ : Optional[Any] = model.to(_lowercase )
snake_case_ : List[Any] = BeitImageProcessor(do_resize=_lowercase , size=6_4_0 , do_center_crop=_lowercase )
snake_case_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ : Any = Image.open(ds[0]["""file"""] )
snake_case_ : Optional[int] = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
snake_case_ : Optional[Any] = model(**_lowercase )
snake_case_ : str = outputs.logits.detach().cpu()
snake_case_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(5_0_0, 3_0_0)] )
snake_case_ : int = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , _lowercase )
snake_case_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
snake_case_ : Optional[Any] = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase = True , _lowercase = False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = scheduler
snake_case_ : str = optimizers if isinstance(_lowercase , (list, tuple) ) else [optimizers]
snake_case_ : str = split_batches
snake_case_ : Tuple = step_with_optimizer
snake_case_ : List[Any] = GradientState()
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowercase , **_lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowercase , **_lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case_ : Optional[int] = AcceleratorState().num_processes
for _ in range(_lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowercase , **_lowercase )
else:
self.scheduler.step(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.scheduler.get_last_lr()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.scheduler.state_dict()
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
self.scheduler.load_state_dict(_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self.scheduler.get_lr()
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.scheduler.print_lr(*_lowercase , **_lowercase )
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCAmelCase ( __UpperCamelCase : int = 8 ):
'''simple docstring'''
snake_case_ : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
i -= len(__UpperCamelCase )
snake_case_ : Union[str, Any] = i // 3
snake_case_ : Optional[int] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ : str = (
chars_incl
+ random(__UpperCamelCase , quotient + remainder )
+ random(__UpperCamelCase , __UpperCamelCase )
+ random(__UpperCamelCase , __UpperCamelCase )
)
snake_case_ : str = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple ):
'''simple docstring'''
pass # Put your code here...
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
pass # Put your code here...
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ):
'''simple docstring'''
pass # Put your code here...
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int = 8 ):
'''simple docstring'''
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ : Tuple = any(char in ascii_uppercase for char in password )
snake_case_ : str = any(char in ascii_lowercase for char in password )
snake_case_ : int = any(char in digits for char in password )
snake_case_ : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ : Tuple = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(__UpperCamelCase ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(__UpperCamelCase , __UpperCamelCase ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowerCAmelCase : Dict = logging.getLogger(__name__)
__lowerCAmelCase : Tuple = 50 # max width of layer names
__lowerCAmelCase : List[str] = 70 # max width of quantizer names
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=__UpperCamelCase , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=__UpperCamelCase , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=__UpperCamelCase , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=__UpperCamelCase , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=__UpperCamelCase , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=__UpperCamelCase , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if args.calibrator == "max":
snake_case_ : Dict = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
snake_case_ : List[Any] = """histogram"""
elif args.calibrator == "mse":
snake_case_ : List[str] = """histogram"""
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
snake_case_ : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=__UpperCamelCase )
snake_case_ : Union[str, Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__UpperCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[Any]=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__UpperCamelCase , ["""embeddings"""] , which="""weight""" , _disabled=__UpperCamelCase )
if args.quant_disable:
set_quantizer_by_name(__UpperCamelCase , [""""""] , _disabled=__UpperCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__UpperCamelCase , args.quant_disable_keyword , _disabled=__UpperCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__UpperCamelCase , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=__UpperCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__UpperCamelCase , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=__UpperCamelCase )
if args.recalibrate_weights:
recalibrate_weights(__UpperCamelCase )
if args.fuse_qkv:
fuse_qkv(__UpperCamelCase , __UpperCamelCase )
if args.clip_gelu:
clip_gelu(__UpperCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
def fusea(__UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
for mod in [qq, qk, qv]:
if not hasattr(__UpperCamelCase , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
snake_case_ : Optional[Any] = qq._amax.detach().item()
snake_case_ : Dict = qk._amax.detach().item()
snake_case_ : str = qv._amax.detach().item()
snake_case_ : List[str] = max(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
qq._amax.fill_(__UpperCamelCase )
qk._amax.fill_(__UpperCamelCase )
qv._amax.fill_(__UpperCamelCase )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
snake_case_ : Tuple = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__UpperCamelCase )
snake_case_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__UpperCamelCase , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
snake_case_ : Union[str, Any] = mod.weight.shape[0]
snake_case_ : List[str] = mod._weight_quantizer._amax.detach()
snake_case_ : int = torch.ones(__UpperCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__UpperCamelCase , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case_ : Optional[int] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case_ : Any = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case_ : Optional[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__UpperCamelCase , keepdims=__UpperCamelCase ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
snake_case_ : List[Any] = amax
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str]=2_5 , __UpperCamelCase : Dict=1_8_0 , __UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
if ignore is None:
snake_case_ : Any = []
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Union[str, Any] = [ignore]
snake_case_ : int = 0
for name, mod in model.named_modules():
if not hasattr(__UpperCamelCase , """weight""" ):
continue
snake_case_ : Dict = max(__UpperCamelCase , len(__UpperCamelCase ) )
for name, mod in model.named_modules():
snake_case_ : Tuple = getattr(__UpperCamelCase , """_input_quantizer""" , __UpperCamelCase )
snake_case_ : Dict = getattr(__UpperCamelCase , """_weight_quantizer""" , __UpperCamelCase )
if not hasattr(__UpperCamelCase , """weight""" ):
continue
if type(__UpperCamelCase ) in ignore:
continue
if [True for s in ignore if type(__UpperCamelCase ) is str and s in name]:
continue
snake_case_ : Tuple = F'Act:{input_q.extra_repr()}'
snake_case_ : Tuple = F'Wgt:{weight_q.extra_repr()}'
snake_case_ : Union[str, Any] = F'{name:{name_width}} {act_str} {wgt_str}'
if len(__UpperCamelCase ) <= line_width:
logger.info(__UpperCamelCase )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(__UpperCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Dict = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if quantizer_mod is not None:
assert hasattr(__UpperCamelCase , __UpperCamelCase )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
logger.warning(F'{name} has no {quantizer}' )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]="both" , **__UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(__UpperCamelCase , __UpperCamelCase , """_input_quantizer""" , __UpperCamelCase , __UpperCamelCase )
if which in ["weight", "both"]:
set_quantizer(__UpperCamelCase , __UpperCamelCase , """_weight_quantizer""" , __UpperCamelCase , __UpperCamelCase )
logger.info(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , **__UpperCamelCase : int ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__UpperCamelCase , """_input_quantizer""" ) or hasattr(__UpperCamelCase , """_weight_quantizer""" ):
for n in names:
if re.search(__UpperCamelCase , __UpperCamelCase ):
set_quantizers(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
logger.info(__UpperCamelCase )
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''encoder-decoder'''
_lowerCamelCase = True
def __init__( self , **_lowercase ) -> Any:
'''simple docstring'''
super().__init__(**_lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : int = kwargs.pop("""encoder""" )
snake_case_ : List[str] = encoder_config.pop("""model_type""" )
snake_case_ : Optional[int] = kwargs.pop("""decoder""" )
snake_case_ : Tuple = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Tuple = AutoConfig.for_model(_lowercase , **_lowercase )
snake_case_ : str = AutoConfig.for_model(_lowercase , **_lowercase )
snake_case_ : Optional[Any] = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , _lowercase , **_lowercase ) -> PretrainedConfig:
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case_ : Union[str, Any] = True
snake_case_ : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Dict = self.encoder.to_dict()
snake_case_ : Any = self.decoder.to_dict()
snake_case_ : Optional[Any] = self.__class__.model_type
return output
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
snake_case_ : Union[str, Any] = Dataset.from_dict(__UpperCamelCase )
return dataset
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = get_dataset()
snake_case_ : Optional[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = get_dataset()
snake_case_ , snake_case_ : Union[str, Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''xlm-roberta-xl'''
def __init__( self , _lowercase=2_5_0_8_8_0 , _lowercase=2_5_6_0 , _lowercase=3_6 , _lowercase=3_2 , _lowercase=1_0_2_4_0 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_4 , _lowercase=1 , _lowercase=0.02 , _lowercase=1E-05 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : Optional[int] = initializer_range
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : List[str] = use_cache
snake_case_ : Optional[int] = classifier_dropout
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowerCAmelCase : Tuple = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__lowerCAmelCase : str = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case_ : Optional[Any] = bs[:]
snake_case_ : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : List[Any] = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Any = set()
snake_case_ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Optional[Any] = char
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
snake_case_ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
snake_case_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : str = json.load(_lowercase )
snake_case_ : Optional[int] = {v: k for k, v in self.encoder.items()}
snake_case_ : List[str] = errors # how to handle errors in decoding
snake_case_ : Any = bytes_to_unicode()
snake_case_ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
snake_case_ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Any = {}
snake_case_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : List[str] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : str = tuple(_lowercase )
snake_case_ : Optional[int] = get_pairs(_lowercase )
if not pairs:
return token
while True:
snake_case_ : str = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Tuple = bigram
snake_case_ : Any = []
snake_case_ : List[str] = 0
while i < len(_lowercase ):
try:
snake_case_ : Any = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Optional[int] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Optional[int] = tuple(_lowercase )
snake_case_ : int = new_word
if len(_lowercase ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(_lowercase )
snake_case_ : Optional[int] = """ """.join(_lowercase )
snake_case_ : List[str] = word
return word
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = []
for token in re.findall(self.pat , _lowercase ):
snake_case_ : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = """""".join(_lowercase )
snake_case_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : int = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Dict = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
snake_case_ : List[Any] = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : Dict = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
snake_case_ : int = """ """ + text
return (text, kwargs)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> str:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , _lowercase ) -> List[int]:
'''simple docstring'''
snake_case_ : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_lowercase )
snake_case_ : Tuple = """ """.join(_lowercase )
snake_case_ : Optional[int] = self.encode(_lowercase )
if len(_lowercase ) > self.model_max_length:
snake_case_ : str = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 21 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DDIMPipeline
_lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
_lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
snake_case_ : Optional[Any] = DDIMScheduler()
snake_case_ : Any = {"""unet""": unet, """scheduler""": scheduler}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : int = torch.manual_seed(_lowercase )
else:
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[int] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """cpu"""
snake_case_ : int = self.get_dummy_components()
snake_case_ : Tuple = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Any = self.get_dummy_inputs(_lowercase )
snake_case_ : Dict = pipe(**_lowercase ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
snake_case_ : Optional[int] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
snake_case_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1E-3 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = """google/ddpm-cifar10-32"""
snake_case_ : Optional[Any] = UNetaDModel.from_pretrained(_lowercase )
snake_case_ : int = DDIMScheduler()
snake_case_ : Union[str, Any] = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddim.to(_lowercase )
ddim.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = ddim(generator=_lowercase , eta=0.0 , output_type="""numpy""" ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case_ : Tuple = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = """google/ddpm-ema-bedroom-256"""
snake_case_ : Tuple = UNetaDModel.from_pretrained(_lowercase )
snake_case_ : List[Any] = DDIMScheduler.from_pretrained(_lowercase )
snake_case_ : List[str] = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddpm.to(_lowercase )
ddpm.set_progress_bar_config(disable=_lowercase )
snake_case_ : Union[str, Any] = torch.manual_seed(0 )
snake_case_ : Any = ddpm(generator=_lowercase , output_type="""numpy""" ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
snake_case_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
snake_case_ : Optional[Any] = 1_1
snake_case_ : Any = int("""1""" + """0""" * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
snake_case_ : Optional[Any] = 1_0
return solutions
def __lowerCAmelCase ( __UpperCamelCase : int = 2 ):
'''simple docstring'''
snake_case_ : Dict = 1.0
for fraction in fraction_list(__UpperCamelCase ):
snake_case_ : Dict = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
__lowerCAmelCase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : List[str] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = word.split()
def justify(__UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
snake_case_ : int = max_width - width
snake_case_ : int = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case_ : Union[str, Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case_ : Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case_ : str = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
snake_case_ : List[Any] = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
snake_case_ : Tuple = []
snake_case_ : list[str] = []
snake_case_ : List[str] = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
snake_case_ , snake_case_ : Union[str, Any] = [word], len(__UpperCamelCase )
snake_case_ : Dict = max_width - width - len(__UpperCamelCase )
answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 21 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase : Dict = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase : Optional[Any] = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = GPTaTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase=False , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
snake_case_ : Optional[Any] = kwargs.pop("""add_bos_token""" , _lowercase )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowercase ) != add_prefix_space:
snake_case_ : List[Any] = getattr(_lowercase , pre_tok_state.pop("""type""" ) )
snake_case_ : Tuple = add_prefix_space
snake_case_ : Optional[int] = pre_tok_class(**_lowercase )
snake_case_ : Optional[int] = add_prefix_space
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Dict = kwargs.get("""is_split_into_words""" , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : List[str] = kwargs.get("""is_split_into_words""" , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : int = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[int]:
'''simple docstring'''
snake_case_ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
snake_case_ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase : Optional[Any] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase : Any = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__lowerCAmelCase : List[Any] = '''▁'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BigBirdTokenizer
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = []
def __init__( self , _lowercase=None , _lowercase=None , _lowercase="<unk>" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="[SEP]" , _lowercase="[MASK]" , _lowercase="[CLS]" , **_lowercase , ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
snake_case_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
snake_case_ : Optional[Any] = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : Dict = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.dummy_uncond_unet
snake_case_ : Optional[Any] = KarrasVeScheduler()
snake_case_ : int = KarrasVePipeline(unet=_lowercase , scheduler=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = pipe(num_inference_steps=2 , generator=_lowercase , output_type="""numpy""" ).images
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : Optional[Any] = pipe(num_inference_steps=2 , generator=_lowercase , output_type="""numpy""" , return_dict=_lowercase )[0]
snake_case_ : Dict = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case_ : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = """google/ncsnpp-celebahq-256"""
snake_case_ : Optional[int] = UNetaDModel.from_pretrained(_lowercase )
snake_case_ : Optional[int] = KarrasVeScheduler()
snake_case_ : Optional[int] = KarrasVePipeline(unet=_lowercase , scheduler=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = pipe(num_inference_steps=2_0 , generator=_lowercase , output_type="""numpy""" ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
snake_case_ : List[str] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 21 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 1 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = (KDPMaDiscreteScheduler,)
_lowerCamelCase = 10
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowercase )
return config
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case_ : Union[str, Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Any = self.dummy_model()
snake_case_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Optional[int] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Tuple = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : List[str] = model(_lowercase , _lowercase )
snake_case_ : Optional[Any] = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : int = output.prev_sample
snake_case_ : List[str] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Tuple = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
if torch_device == "mps":
return
snake_case_ : Optional[Any] = self.scheduler_classes[0]
snake_case_ : str = self.get_scheduler_config()
snake_case_ : List[str] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : int = self.dummy_model()
snake_case_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : List[str] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[Any] = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : str = model(_lowercase , _lowercase )
snake_case_ : str = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : int = output.prev_sample
snake_case_ : int = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if torch_device == "mps":
return
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
snake_case_ : str = self.dummy_model()
snake_case_ : List[Any] = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ : Tuple = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : List[Any] = model(_lowercase , _lowercase )
snake_case_ : str = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : Optional[Any] = output.prev_sample
snake_case_ : int = torch.sum(torch.abs(_lowercase ) )
snake_case_ : str = torch.mean(torch.abs(_lowercase ) )
if str(_lowercase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=None , _lowercase=True , _lowercase=True , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = size if size is not None else {"""height""": 2_0, """width""": 2_0}
snake_case_ : List[str] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = num_channels
snake_case_ : Dict = image_size
snake_case_ : List[str] = min_resolution
snake_case_ : str = max_resolution
snake_case_ : List[Any] = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Optional[Any] = do_convert_rgb
snake_case_ : Optional[int] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case_ : List[str] = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
snake_case_ : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_convert_rgb""" ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.image_processor_tester.prepare_dummy_image()
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : Dict = 2_0_4_8
snake_case_ : Any = image_processor(_lowercase , return_tensors="""pt""" , max_patches=_lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Any = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Optional[int] = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
snake_case_ : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowercase ):
snake_case_ : Dict = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
snake_case_ : Dict = """Hello"""
snake_case_ : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Tuple = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
snake_case_ : int = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Tuple = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
snake_case_ : Tuple = 3
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_convert_rgb""" ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : int = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Dict = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Any = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
import argparse
import copy
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[Any] = {}
with open(__UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : Dict = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : int = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : Optional[Any] = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase ) as f:
snake_case_ : str = f.read(1 )
snake_case_ : Any = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : List[Any] = 0
while visiting not in first_solution:
snake_case_ : List[Any] = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__UpperCamelCase ) and k[0] not in first_solution:
snake_case_ : Dict = k[1]
snake_case_ : Optional[int] = k[0]
first_solution.append(__UpperCamelCase )
snake_case_ : List[Any] = distance_of_first_solution + int(__UpperCamelCase )
snake_case_ : Dict = best_node
first_solution.append(__UpperCamelCase )
snake_case_ : Dict = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = []
for n in solution[1:-1]:
snake_case_ : Optional[int] = solution.index(__UpperCamelCase )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(__UpperCamelCase )
if n == kn:
continue
snake_case_ : List[Any] = copy.deepcopy(__UpperCamelCase )
snake_case_ : Union[str, Any] = kn
snake_case_ : Dict = n
snake_case_ : List[str] = 0
for k in _tmp[:-1]:
snake_case_ : Any = _tmp[_tmp.index(__UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(__UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[int] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[Any] = 1
snake_case_ : int = first_solution
snake_case_ : Tuple = []
snake_case_ : Tuple = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : Dict = find_neighborhood(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = 0
snake_case_ : int = neighborhood[index_of_best_solution]
snake_case_ : int = len(__UpperCamelCase ) - 1
snake_case_ : Any = False
while not found:
snake_case_ : Dict = 0
while i < len(__UpperCamelCase ):
if best_solution[i] != solution[i]:
snake_case_ : Tuple = best_solution[i]
snake_case_ : Any = solution[i]
break
snake_case_ : Tuple = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Optional[int] = True
snake_case_ : Union[str, Any] = best_solution[:-1]
snake_case_ : Any = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : List[str] = cost
snake_case_ : str = solution
else:
snake_case_ : Any = index_of_best_solution + 1
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
if len(__UpperCamelCase ) >= size:
tabu_list.pop(0 )
snake_case_ : Optional[Any] = count + 1
return best_solution_ever, best_cost
def __lowerCAmelCase ( __UpperCamelCase : List[str]=None ):
'''simple docstring'''
snake_case_ : str = generate_neighbours(args.File )
snake_case_ , snake_case_ : str = generate_first_solution(
args.File , __UpperCamelCase )
snake_case_ , snake_case_ : Tuple = tabu_search(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
__lowerCAmelCase : int = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 21 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=False ):
'''simple docstring'''
try:
snake_case_ : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ : Optional[Any] = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ : Optional[Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
__lowerCAmelCase : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
__lowerCAmelCase : int = parse_flag_from_env('''RUN_REMOTE''', default=False)
__lowerCAmelCase : List[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
__lowerCAmelCase : Optional[Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__lowerCAmelCase : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__lowerCAmelCase : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__lowerCAmelCase : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__lowerCAmelCase : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__lowerCAmelCase : int = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__lowerCAmelCase : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__lowerCAmelCase : Union[str, Any] = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
snake_case_ : int = unittest.skip("""test requires faiss""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
snake_case_ : Dict = unittest.skip("""test requires regex""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
snake_case_ : str = unittest.skip("""test requires elasticsearch""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
snake_case_ : Tuple = unittest.skip("""test requires sqlalchemy""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
snake_case_ : Any = unittest.skip("""test requires PyTorch""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
if not config.TF_AVAILABLE:
snake_case_ : Tuple = unittest.skip("""test requires TensorFlow""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
snake_case_ : Optional[Any] = unittest.skip("""test requires JAX""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
snake_case_ : Optional[Any] = unittest.skip("""test requires Pillow""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__UpperCamelCase )
else:
return test_case
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__UpperCamelCase )
else:
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__UpperCamelCase )
else:
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
def _require_spacy_model(__UpperCamelCase : Optional[Any] ):
try:
import spacy # noqa F401
spacy.load(__UpperCamelCase )
except ImportError:
return unittest.skip("""test requires spacy""" )(__UpperCamelCase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__UpperCamelCase ) )(__UpperCamelCase )
else:
return test_case
return _require_spacy_model
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__UpperCamelCase )
else:
return test_case
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__UpperCamelCase )
else:
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
snake_case_ : Any = unittest.skip("""test is slow""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
snake_case_ : Optional[int] = unittest.skip("""test is local""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case_ : Union[str, Any] = unittest.skip("""test is packaged""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
snake_case_ : List[str] = unittest.skip("""test requires remote""" )(__UpperCamelCase )
return test_case
def __lowerCAmelCase ( *__UpperCamelCase : List[str] ):
'''simple docstring'''
def decorate(cls : Union[str, Any] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCamelCase ) and name.startswith("""test""" ):
for decorator in decorators:
snake_case_ : Optional[int] = decorator(__UpperCamelCase )
setattr(cls , __UpperCamelCase , __UpperCamelCase )
return cls
return decorate
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = 2
@contextmanager
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCamelCase : Optional[int]=1E-1_6 ):
'''simple docstring'''
snake_case_ : Dict = requests.Session().request
def timeout_request(__UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Any , **__UpperCamelCase : List[Any] ):
# Change the url to an invalid url so that the connection hangs
snake_case_ : Optional[int] = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
snake_case_ : Optional[Any] = timeout
try:
return online_request(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case_ : Union[str, Any] = url
snake_case_ : Dict = e.args[0]
snake_case_ : int = (max_retry_error.args[0].replace("""10.255.255.1""" , F'OfflineMock[{url}]' ),)
snake_case_ : Union[str, Any] = (max_retry_error,)
raise
def raise_connection_error(__UpperCamelCase : List[str] , __UpperCamelCase : List[str] , **__UpperCamelCase : int ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=__UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def __lowerCAmelCase ( *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCamelCase , **__UpperCamelCase ) as tmp_dir:
try:
os.chdir(__UpperCamelCase )
yield
finally:
os.chdir(__UpperCamelCase )
@contextmanager
def __lowerCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
snake_case_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowerCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
snake_case_ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return deepcopy(__UpperCamelCase ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(__UpperCamelCase ).integers(0 , 1_0_0 , 1_0 ).tolist()
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCamelCase : List[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : int ):
try:
return func(*__UpperCamelCase , **__UpperCamelCase )
except HTTPError as err:
if str(__UpperCamelCase ).startswith("""500""" ) or str(__UpperCamelCase ).startswith("""502""" ):
pytest.xfail(str(__UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCamelCase )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = returncode
snake_case_ : Dict = stdout
snake_case_ : Union[str, Any] = stderr
async def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ):
'''simple docstring'''
while True:
snake_case_ : int = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : int=None , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(__UpperCamelCase ) )
snake_case_ : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ : Union[str, Any] = []
snake_case_ : Dict = []
def tee(__UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : List[str]="" ):
snake_case_ : Any = line.decode("""utf-8""" ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label="""stderr:""" ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : str=1_8_0 , __UpperCamelCase : Any=False , __UpperCamelCase : List[str]=True ):
'''simple docstring'''
snake_case_ : Union[str, Any] = asyncio.get_event_loop()
snake_case_ : List[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
snake_case_ : int = """ """.join(__UpperCamelCase )
if result.returncode > 0:
snake_case_ : Tuple = """\n""".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
snake_case_ : List[Any] = re.sub(r"""^gw""" , """""" , __UpperCamelCase , 0 , re.M )
return int(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = 2_9_5_0_0
snake_case_ : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
# TODO Update this
__lowerCAmelCase : List[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''esm'''
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_0_2_6 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase="absolute" , _lowercase=True , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , mask_token_id=_lowercase , **_lowercase )
snake_case_ : List[str] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Optional[Any] = position_embedding_type
snake_case_ : str = use_cache
snake_case_ : Any = emb_layer_norm_before
snake_case_ : Optional[int] = token_dropout
snake_case_ : Union[str, Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
snake_case_ : Tuple = EsmFoldConfig()
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[Any] = EsmFoldConfig(**_lowercase )
snake_case_ : Tuple = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
snake_case_ : Union[str, Any] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : Any = None
snake_case_ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , _lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , _lowercase ):
snake_case_ : Any = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = 128
_lowerCamelCase = None
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , _lowercase ):
snake_case_ : Optional[Any] = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : List[Any] = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 48
_lowerCamelCase = 1_024
_lowerCamelCase = 128
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = False
_lowerCamelCase = 4
_lowerCamelCase = 128
_lowerCamelCase = None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : List[str] = StructureModuleConfig()
elif isinstance(self.structure_module , _lowercase ):
snake_case_ : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
snake_case_ : List[Any] = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = asdict(self )
snake_case_ : int = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 384
_lowerCamelCase = 128
_lowerCamelCase = 16
_lowerCamelCase = 128
_lowerCamelCase = 12
_lowerCamelCase = 4
_lowerCamelCase = 8
_lowerCamelCase = 0.1
_lowerCamelCase = 8
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 7
_lowerCamelCase = 10
_lowerCamelCase = 1E-8
_lowerCamelCase = 1E5
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return asdict(self )
def __lowerCAmelCase ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__lowerCAmelCase : Optional[int] = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__lowerCAmelCase : Optional[Any] = {
'''ctrl''': 256,
}
__lowerCAmelCase : str = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = set()
snake_case_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : int = char
snake_case_ : Optional[Any] = set(__UpperCamelCase )
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = CONTROL_CODES
def __init__( self , _lowercase , _lowercase , _lowercase="<unk>" , **_lowercase ) -> Any:
'''simple docstring'''
super().__init__(unk_token=_lowercase , **_lowercase )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : List[str] = json.load(_lowercase )
snake_case_ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
snake_case_ : List[str] = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : Any = [tuple(merge.split() ) for merge in merges]
snake_case_ : List[str] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Union[str, Any] = {}
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : Optional[int] = tuple(_lowercase )
snake_case_ : Any = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
snake_case_ : str = get_pairs(_lowercase )
if not pairs:
return token
while True:
snake_case_ : List[Any] = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : str = bigram
snake_case_ : Optional[Any] = []
snake_case_ : List[Any] = 0
while i < len(_lowercase ):
try:
snake_case_ : str = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Any = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : List[Any] = tuple(_lowercase )
snake_case_ : Optional[int] = new_word
if len(_lowercase ) == 1:
break
else:
snake_case_ : Optional[Any] = get_pairs(_lowercase )
snake_case_ : Any = """@@ """.join(_lowercase )
snake_case_ : Optional[Any] = word[:-4]
snake_case_ : List[Any] = word
return word
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : List[Any] = re.findall(R"""\S+\n?""" , _lowercase )
for token in words:
split_tokens.extend(list(self.bpe(_lowercase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
return self.decoder.get(_lowercase , self.unk_token )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = """ """.join(_lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : Any = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Dict = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
snake_case_ : Union[str, Any] = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : Dict = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((snake_case_) , (snake_case_)) : Union[str, Any] = extended_euclid(__UpperCamelCase , a % b )
snake_case_ : Optional[int] = a // b
return (y, x - k * y)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
((snake_case_) , (snake_case_)) : List[str] = extended_euclid(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = na * na
snake_case_ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
((snake_case_) , (snake_case_)) : List[str] = extended_euclid(__UpperCamelCase , __UpperCamelCase )
if b < 0:
snake_case_ : Dict = (b % n + n) % n
return b
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = invert_modulo(__UpperCamelCase , __UpperCamelCase ), invert_modulo(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = na * na
snake_case_ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase : Dict = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = BartTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , _lowercase=True , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase , **_lowercase , )
snake_case_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowercase ) != add_prefix_space:
snake_case_ : str = getattr(_lowercase , pre_tok_state.pop("""type""" ) )
snake_case_ : str = add_prefix_space
snake_case_ : Any = pre_tok_class(**_lowercase )
snake_case_ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ : Dict = """post_processor"""
snake_case_ : List[Any] = getattr(self.backend_tokenizer , _lowercase , _lowercase )
if tokenizer_component_instance:
snake_case_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : Dict = tuple(state["""sep"""] )
if "cls" in state:
snake_case_ : Union[str, Any] = tuple(state["""cls"""] )
snake_case_ : List[Any] = False
if state.get("""add_prefix_space""" , _lowercase ) != add_prefix_space:
snake_case_ : Union[str, Any] = add_prefix_space
snake_case_ : List[Any] = True
if state.get("""trim_offsets""" , _lowercase ) != trim_offsets:
snake_case_ : List[Any] = trim_offsets
snake_case_ : Dict = True
if changes_to_apply:
snake_case_ : Tuple = getattr(_lowercase , state.pop("""type""" ) )
snake_case_ : Any = component_class(**_lowercase )
setattr(self.backend_tokenizer , _lowercase , _lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else value
snake_case_ : str = value
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Tuple = kwargs.get("""is_split_into_words""" , _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Tuple = kwargs.get("""is_split_into_words""" , _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : str = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = BlenderbotConfig
_lowerCamelCase = {}
_lowerCamelCase = '''gelu'''
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=9_9 , _lowercase=3_2 , _lowercase=2 , _lowercase=4 , _lowercase=3_7 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=2_0 , _lowercase=2 , _lowercase=1 , _lowercase=0 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Any = batch_size
snake_case_ : Tuple = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : str = use_labels
snake_case_ : Tuple = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = eos_token_id
snake_case_ : Dict = pad_token_id
snake_case_ : int = bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : List[str] = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = TFBlenderbotModel(config=_lowercase ).get_decoder()
snake_case_ : Optional[int] = inputs_dict["""input_ids"""]
snake_case_ : Optional[Any] = input_ids[:1, :]
snake_case_ : List[Any] = inputs_dict["""attention_mask"""][:1, :]
snake_case_ : Dict = inputs_dict["""head_mask"""]
snake_case_ : Tuple = 1
# first forward pass
snake_case_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
snake_case_ , snake_case_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ : Dict = model(_lowercase , attention_mask=_lowercase )[0]
snake_case_ : str = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ : Tuple = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_lowerCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = TFBlenderbotModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ['''My friends are cool but they eat too many carbs.''']
_lowerCamelCase = '''facebook/blenderbot-400M-distill'''
@cached_property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.tokenizer(self.src_text , return_tensors="""tf""" )
snake_case_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
snake_case_ : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowercase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 21 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 1 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase : int = TypeVar('''T''')
__lowerCAmelCase : Optional[int] = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase : List[Any] = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase : Dict = Union[str, bytes, os.PathLike]
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : List[str] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCAmelCase : Optional[int] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__lowerCAmelCase : Dict = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def remove_articles(__UpperCamelCase : str ):
snake_case_ : Tuple = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__UpperCamelCase , """ """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase : Any ):
snake_case_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = [any(compute_exact(__UpperCamelCase , __UpperCamelCase ) for ref in refs ) for pred, refs in zip(__UpperCamelCase , __UpperCamelCase )]
return (sum(__UpperCamelCase ) / len(__UpperCamelCase )) * 1_0_0
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
snake_case_ : List[str] = Counter(__UpperCamelCase )
snake_case_ : str = Counter(__UpperCamelCase )
snake_case_ : str = Counter()
for sgram, scount in sgramcounter.items():
snake_case_ : Optional[int] = scount * numref
snake_case_ : int = Counter(__UpperCamelCase )
snake_case_ : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
snake_case_ : str = ccount * numref
# KEEP
snake_case_ : Optional[Any] = sgramcounter_rep & cgramcounter_rep
snake_case_ : Dict = keepgramcounter_rep & rgramcounter
snake_case_ : Optional[Any] = sgramcounter_rep & rgramcounter
snake_case_ : Optional[int] = 0
snake_case_ : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : Optional[int] = 1
snake_case_ : Tuple = 1
if len(__UpperCamelCase ) > 0:
snake_case_ : int = keeptmpscorea / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case_ : List[str] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case_ : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case_ : Union[str, Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case_ : Union[str, Any] = sgramcounter_rep - cgramcounter_rep
snake_case_ : Optional[Any] = delgramcounter_rep - rgramcounter
snake_case_ : Tuple = sgramcounter_rep - rgramcounter
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : str = 1
if len(__UpperCamelCase ) > 0:
snake_case_ : Tuple = deltmpscorea / len(__UpperCamelCase )
# ADDITION
snake_case_ : Any = set(__UpperCamelCase ) - set(__UpperCamelCase )
snake_case_ : Tuple = set(__UpperCamelCase ) & set(__UpperCamelCase )
snake_case_ : Union[str, Any] = set(__UpperCamelCase ) - set(__UpperCamelCase )
snake_case_ : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : Tuple = 1
snake_case_ : Optional[Any] = 1
if len(__UpperCamelCase ) > 0:
snake_case_ : Dict = addtmpscore / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ : Tuple = addtmpscore / len(__UpperCamelCase )
snake_case_ : int = 0
if addscore_precision > 0 or addscore_recall > 0:
snake_case_ : Optional[int] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[str] = len(__UpperCamelCase )
snake_case_ : Union[str, Any] = ssent.split(""" """ )
snake_case_ : Optional[Any] = csent.split(""" """ )
snake_case_ : int = []
snake_case_ : int = []
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
snake_case_ : str = []
snake_case_ : Union[str, Any] = []
snake_case_ : Dict = []
snake_case_ : List[str] = []
snake_case_ : int = []
snake_case_ : Dict = []
for rsent in rsents:
snake_case_ : Any = rsent.split(""" """ )
snake_case_ : List[str] = []
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = []
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case_ : Optional[int] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case_ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case_ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case_ : Optional[Any] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case_ : List[str] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case_ : Optional[Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case_ : Union[str, Any] = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case_ : Any = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case_ : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__UpperCamelCase )
((snake_case_) , (snake_case_) , (snake_case_)) : Tuple = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((snake_case_) , (snake_case_) , (snake_case_)) : Any = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((snake_case_) , (snake_case_) , (snake_case_)) : Any = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((snake_case_) , (snake_case_) , (snake_case_)) : str = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case_ : Tuple = sum([delascore, delascore, delascore, delascore] ) / 4
snake_case_ : Any = sum([addascore, addascore, addascore, addascore] ) / 4
snake_case_ : Tuple = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : bool = True , __UpperCamelCase : str = "13a" , __UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
snake_case_ : Optional[int] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case_ : str = sacrebleu.metrics.bleu._get_tokenizer(__UpperCamelCase )()(__UpperCamelCase )
else:
snake_case_ : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__UpperCamelCase )
elif tokenizer == "moses":
snake_case_ : int = sacremoses.MosesTokenizer().tokenize(__UpperCamelCase , return_str=__UpperCamelCase , escape=__UpperCamelCase )
elif tokenizer == "penn":
snake_case_ : Tuple = sacremoses.MosesTokenizer().penn_tokenize(__UpperCamelCase , return_str=__UpperCamelCase )
else:
snake_case_ : str = sentence
if not return_str:
snake_case_ : List[Any] = normalized_sent.split()
return normalized_sent
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
if not (len(__UpperCamelCase ) == len(__UpperCamelCase ) == len(__UpperCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
snake_case_ : Dict = 0
for src, pred, refs in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
sari_score += SARIsent(normalize(__UpperCamelCase ) , normalize(__UpperCamelCase ) , [normalize(__UpperCamelCase ) for sent in refs] )
snake_case_ : Dict = sari_score / len(__UpperCamelCase )
return 1_0_0 * sari_score
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]="exp" , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : str=False , __UpperCamelCase : int=False , ):
'''simple docstring'''
snake_case_ : str = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
snake_case_ : List[Any] = sacrebleu.corpus_bleu(
__UpperCamelCase , __UpperCamelCase , smooth_method=__UpperCamelCase , smooth_value=__UpperCamelCase , force=__UpperCamelCase , lowercase=__UpperCamelCase , use_effective_order=__UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
result.update({"""sari""": compute_sari(sources=_lowercase , predictions=_lowercase , references=_lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowercase , references=_lowercase )} )
result.update({"""exact""": compute_em(predictions=_lowercase , references=_lowercase )} )
return result
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''autoformer'''
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _lowercase = None , _lowercase = None , _lowercase = "student_t" , _lowercase = "nll" , _lowercase = 1 , _lowercase = [1, 2, 3, 4, 5, 6, 7] , _lowercase = True , _lowercase = 0 , _lowercase = 0 , _lowercase = 0 , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = 6_4 , _lowercase = 2 , _lowercase = 2 , _lowercase = 2 , _lowercase = 2 , _lowercase = 3_2 , _lowercase = 3_2 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 1_0_0 , _lowercase = 0.02 , _lowercase = True , _lowercase=True , _lowercase = 1_0 , _lowercase = 2_5 , _lowercase = 3 , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = prediction_length
snake_case_ : Tuple = context_length if context_length is not None else prediction_length
snake_case_ : Optional[int] = distribution_output
snake_case_ : List[str] = loss
snake_case_ : Dict = input_size
snake_case_ : str = num_time_features
snake_case_ : Union[str, Any] = lags_sequence
snake_case_ : Union[str, Any] = scaling
snake_case_ : str = num_dynamic_real_features
snake_case_ : List[Any] = num_static_real_features
snake_case_ : str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : Optional[Any] = cardinality
else:
snake_case_ : Tuple = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : Optional[int] = embedding_dimension
else:
snake_case_ : str = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ : str = num_parallel_samples
# Transformer architecture configuration
snake_case_ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ : Any = d_model
snake_case_ : Optional[Any] = encoder_attention_heads
snake_case_ : Optional[int] = decoder_attention_heads
snake_case_ : List[Any] = encoder_ffn_dim
snake_case_ : Optional[int] = decoder_ffn_dim
snake_case_ : Any = encoder_layers
snake_case_ : Any = decoder_layers
snake_case_ : Union[str, Any] = dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : str = encoder_layerdrop
snake_case_ : Any = decoder_layerdrop
snake_case_ : Tuple = activation_function
snake_case_ : Optional[Any] = init_std
snake_case_ : Optional[int] = use_cache
# Autoformer
snake_case_ : List[Any] = label_length
snake_case_ : Union[str, Any] = moving_average
snake_case_ : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__lowerCAmelCase : Dict = logging.getLogger(__name__)
__lowerCAmelCase : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__lowerCAmelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowerCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowerCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_lowerCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.train_file is not None:
snake_case_ : Dict = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case_ : Any = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Any = [json.loads(__UpperCamelCase ) for line in f.read().splitlines() if (len(__UpperCamelCase ) > 0 and not line.isspace())]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
snake_case_ : str = {c: dataset[c] for c in dataset.column_names}
snake_case_ : Dict = refs
return Dataset.from_dict(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __UpperCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case_ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
snake_case_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
snake_case_ : List[str] = {}
if data_args.train_file is not None:
snake_case_ : Dict = data_args.train_file
if data_args.validation_file is not None:
snake_case_ : Any = data_args.validation_file
snake_case_ : Tuple = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
snake_case_ : Optional[int] = """text"""
snake_case_ : List[str] = load_dataset(__UpperCamelCase , data_files=__UpperCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
snake_case_ : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
snake_case_ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
snake_case_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
snake_case_ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case_ : Dict = AutoModelForMaskedLM.from_config(__UpperCamelCase )
model.resize_token_embeddings(len(__UpperCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case_ : Optional[int] = datasets["""train"""].column_names
else:
snake_case_ : List[str] = datasets["""validation"""].column_names
snake_case_ : List[str] = """text""" if """text""" in column_names else column_names[0]
snake_case_ : Optional[Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__UpperCamelCase : List[Any] ):
# Remove empty lines
snake_case_ : Optional[Any] = [line for line in examples["""text"""] if len(__UpperCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=data_args.max_seq_length )
snake_case_ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case_ : Union[str, Any] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case_ : Optional[Any] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case_ : List[str] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case_ : Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=__UpperCamelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case_ : Optional[int] = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case_ : List[str] = model_args.model_name_or_path
else:
snake_case_ : Optional[int] = None
snake_case_ : Any = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ : List[str] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
snake_case_ : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ : str = trainer.evaluate()
snake_case_ : Tuple = math.exp(eval_output["""eval_loss"""] )
snake_case_ : int = perplexity
snake_case_ : str = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''input_features''', '''is_longer''']
def __init__( self , _lowercase=6_4 , _lowercase=4_8_0_0_0 , _lowercase=4_8_0 , _lowercase=1_0 , _lowercase=1_0_2_4 , _lowercase=0.0 , _lowercase=False , _lowercase = 0 , _lowercase = 1_4_0_0_0 , _lowercase = None , _lowercase = "fusion" , _lowercase = "repeatpad" , **_lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
snake_case_ : Optional[Any] = top_db
snake_case_ : int = truncation
snake_case_ : Tuple = padding
snake_case_ : Any = fft_window_size
snake_case_ : Optional[int] = (fft_window_size >> 1) + 1
snake_case_ : Dict = hop_length
snake_case_ : List[str] = max_length_s
snake_case_ : str = max_length_s * sampling_rate
snake_case_ : str = sampling_rate
snake_case_ : Dict = frequency_min
snake_case_ : Optional[Any] = frequency_max
snake_case_ : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm=_lowercase , mel_scale="""htk""" , )
snake_case_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCAmelCase__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = spectrogram(
_lowercase , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowercase , log_mel="""dB""" , )
return log_mel_spectrogram.T
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : Optional[int] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : Any = [0]
# randomly choose index for each part
snake_case_ : List[str] = np.random.choice(ranges[0] )
snake_case_ : List[str] = np.random.choice(ranges[1] )
snake_case_ : Union[str, Any] = np.random.choice(ranges[2] )
snake_case_ : Optional[Any] = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ : Optional[Any] = torch.tensor(mel[None, None, :] )
snake_case_ : Tuple = torch.nn.functional.interpolate(
_lowercase , size=[chunk_frames, 6_4] , mode="""bilinear""" , align_corners=_lowercase )
snake_case_ : Optional[Any] = mel_shrink[0][0].numpy()
snake_case_ : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ : List[str] = len(_lowercase ) - max_length
snake_case_ : Union[str, Any] = np.random.randint(0 , overflow + 1 )
snake_case_ : Tuple = waveform[idx : idx + max_length]
snake_case_ : Optional[Any] = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ : Optional[Any] = self._np_extract_fbank_features(_lowercase , self.mel_filters )
snake_case_ : Optional[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ : List[str] = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ : int = False
else:
snake_case_ : List[str] = self._random_mel_fusion(_lowercase , _lowercase , _lowercase )
snake_case_ : List[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
snake_case_ : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ : str = int(max_length / len(_lowercase ) )
snake_case_ : Union[str, Any] = np.stack(np.tile(_lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ : List[str] = int(max_length / len(_lowercase ) )
snake_case_ : Tuple = np.stack(np.tile(_lowercase , _lowercase ) )
snake_case_ : int = np.pad(_lowercase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
snake_case_ : Dict = self._np_extract_fbank_features(_lowercase , self.mel_filters )
snake_case_ : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ : int = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
snake_case_ : Union[str, Any] = truncation if truncation is not None else self.truncation
snake_case_ : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case_ : Optional[Any] = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
snake_case_ : Optional[int] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : List[str] = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
snake_case_ : str = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[int] = [np.asarray(_lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ : Dict = [
self._get_input_mel(_lowercase , max_length if max_length else self.nb_max_samples , _lowercase , _lowercase )
for waveform in raw_speech
]
snake_case_ : int = []
snake_case_ : str = []
for mel, longer in padded_inputs:
input_mel.append(_lowercase )
is_longer.append(_lowercase )
if truncation == "fusion" and sum(_lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ : Dict = np.random.randint(0 , len(_lowercase ) )
snake_case_ : List[str] = True
if isinstance(input_mel[0] , _lowercase ):
snake_case_ : int = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ : Dict = [[longer] for longer in is_longer]
snake_case_ : str = {"""input_features""": input_mel, """is_longer""": is_longer}
snake_case_ : Tuple = BatchFeature(_lowercase )
if return_tensors is not None:
snake_case_ : Any = input_features.convert_to_tensors(_lowercase )
return input_features
| 21 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCAmelCase ( __UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def __lowerCAmelCase ( __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray ):
'''simple docstring'''
snake_case_ : Dict = XGBClassifier()
classifier.fit(__UpperCamelCase , __UpperCamelCase )
return classifier
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = load_iris()
snake_case_ , snake_case_ : Optional[Any] = data_handling(__UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = train_test_split(
__UpperCamelCase , __UpperCamelCase , test_size=0.25 )
snake_case_ : List[str] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case_ : List[str] = xgboost(__UpperCamelCase , __UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , display_labels=__UpperCamelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase : Optional[Any] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCAmelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCAmelCase : List[Any] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = spearmanr(_lowercase , _lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : List[str] = [False] * len(__UpperCamelCase )
snake_case_ : Optional[int] = [-1] * len(__UpperCamelCase )
def dfs(__UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
snake_case_ : Dict = True
snake_case_ : int = c
for u in graph[v]:
if not visited[u]:
dfs(__UpperCamelCase , 1 - c )
for i in range(len(__UpperCamelCase ) ):
if not visited[i]:
dfs(__UpperCamelCase , 0 )
for i in range(len(__UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCAmelCase : Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowerCAmelCase : Dict = {'''allegro/herbert-base-cased''': 514}
__lowerCAmelCase : Tuple = {}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = HerbertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase="</s>" , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.cls_token_id]
snake_case_ : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 21 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase : List[str] = 50_0000
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = os.path.split(__file__)
__lowerCAmelCase : Dict = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowerCAmelCase ( __UpperCamelCase : datasets.Dataset , **__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = dataset.map(**__UpperCamelCase )
@get_duration
def __lowerCAmelCase ( __UpperCamelCase : datasets.Dataset , **__UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = dataset.filter(**__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : str = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case_ : Tuple = generate_example_dataset(
os.path.join(__UpperCamelCase , """dataset.arrow""" ) , __UpperCamelCase , num_examples=__UpperCamelCase )
snake_case_ : Dict = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__UpperCamelCase )
def tokenize(__UpperCamelCase : Optional[int] ):
return tokenizer(examples["""text"""] )
snake_case_ : Optional[int] = map(__UpperCamelCase )
snake_case_ : int = map(__UpperCamelCase , batched=__UpperCamelCase )
snake_case_ : int = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
snake_case_ : List[str] = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
snake_case_ : str = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case_ : str = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case_ : int = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
snake_case_ : Tuple = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
snake_case_ : Union[str, Any] = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0 ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or n < 0:
raise ValueError("""Invalid input""" )
snake_case_ : Dict = 1_0**n
snake_case_ : List[Any] = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __UpperCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = (PNDMScheduler,)
_lowerCamelCase = (('''num_inference_steps''', 50),)
def UpperCAmelCase__ ( self , **_lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowercase )
return config
def UpperCAmelCase__ ( self , _lowercase=0 , **_lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = dict(self.forward_default_kwargs )
snake_case_ : Any = kwargs.pop("""num_inference_steps""" , _lowercase )
snake_case_ : int = self.dummy_sample
snake_case_ : Optional[Any] = 0.1 * sample
snake_case_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ : Union[str, Any] = self.get_scheduler_config(**_lowercase )
snake_case_ : Optional[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
snake_case_ : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
snake_case_ : Optional[Any] = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
snake_case_ : List[Any] = dummy_past_residuals[:]
snake_case_ : int = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Optional[int] = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case_ : str = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Union[str, Any] = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self , _lowercase=0 , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = dict(self.forward_default_kwargs )
snake_case_ : Dict = kwargs.pop("""num_inference_steps""" , _lowercase )
snake_case_ : List[str] = self.dummy_sample
snake_case_ : Union[str, Any] = 0.1 * sample
snake_case_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ : str = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
snake_case_ : Union[str, Any] = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ : Optional[Any] = dummy_past_residuals[:]
snake_case_ : List[str] = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Tuple = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case_ : Optional[int] = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Optional[int] = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , **_lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config(**_lowercase )
snake_case_ : Optional[Any] = scheduler_class(**_lowercase )
snake_case_ : Tuple = 1_0
snake_case_ : Optional[int] = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case_ : str = model(_lowercase , _lowercase )
snake_case_ : Optional[Any] = scheduler.step_prk(_lowercase , _lowercase , _lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case_ : Any = model(_lowercase , _lowercase )
snake_case_ : Dict = scheduler.step_plms(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = dict(self.forward_default_kwargs )
snake_case_ : Tuple = kwargs.pop("""num_inference_steps""" , _lowercase )
for scheduler_class in self.scheduler_classes:
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : int = scheduler_class(**_lowercase )
snake_case_ : Dict = self.dummy_sample
snake_case_ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , """set_timesteps""" ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , """set_timesteps""" ):
snake_case_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case_ : Tuple = dummy_past_residuals[:]
snake_case_ : Dict = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase ).prev_sample
snake_case_ : List[str] = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case_ : str = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase ).prev_sample
snake_case_ : str = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : Tuple = self.get_scheduler_config(steps_offset=1 )
snake_case_ : Any = scheduler_class(**_lowercase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 2_7
for scheduler_class in self.scheduler_classes:
snake_case_ : Dict = self.dummy_sample
snake_case_ : Optional[Any] = 0.1 * sample
snake_case_ : Tuple = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case_ : Union[str, Any] = scheduler.step_prk(_lowercase , _lowercase , _lowercase ).prev_sample
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(_lowercase ):
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : List[Any] = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**_lowercase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.full_loop()
snake_case_ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : str = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = self.full_loop(prediction_type="""v_prediction""" )
snake_case_ : Tuple = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Any = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
snake_case_ : Any = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
snake_case_ : Dict = torch.sum(torch.abs(_lowercase ) )
snake_case_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__lowerCAmelCase : List[str] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : int = []
__lowerCAmelCase : Optional[int] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
__lowerCAmelCase : str = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'''emoji''': True,
},
}
]
__lowerCAmelCase : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
__lowerCAmelCase : Dict = 0
with open(log, '''r''') as f:
for line in f:
__lowerCAmelCase : Dict = json.loads(line)
if line.get('''nodeid''', '''''') != "":
__lowerCAmelCase : Optional[int] = line['''nodeid''']
if line.get('''duration''', None) is not None:
__lowerCAmelCase : int = F'''{line["duration"]:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__lowerCAmelCase : Dict = []
log.unlink()
__lowerCAmelCase : Union[str, Any] = ''''''
__lowerCAmelCase : Union[str, Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Optional[int] = {}
for test in failed_tests:
__lowerCAmelCase : Tuple = test[0].split('''::''')
__lowerCAmelCase : Any = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
__lowerCAmelCase : Union[str, Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__lowerCAmelCase : str = [test[0] for test in failed_table]
__lowerCAmelCase : Optional[int] = list(set(files))
# Count number of instances in failed_tests
__lowerCAmelCase : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__lowerCAmelCase : List[Any] = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__lowerCAmelCase : List[str] = '''Too many failed tests, please see the full report in the Action results.'''
__lowerCAmelCase : List[Any] = len(err) + 10
__lowerCAmelCase : int = message[: 3000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
__lowerCAmelCase : Optional[Any] = '''No failed tests! 🤗'''
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
__lowerCAmelCase : List[str] = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
__lowerCAmelCase : List[str] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
__lowerCAmelCase : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
__lowerCAmelCase : List[str] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__lowerCAmelCase : Optional[Any] = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
__lowerCAmelCase : Union[str, Any] = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__lowerCAmelCase : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__lowerCAmelCase : List[str] = row[0]
else:
__lowerCAmelCase : Optional[Any] = ''''''
__lowerCAmelCase : Union[str, Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
import requests
__lowerCAmelCase : Optional[int] = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__lowerCAmelCase : Optional[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__lowerCAmelCase : Dict = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__lowerCAmelCase : int = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_lowercase , _lowercase , sample_weight=_lowercase ) ),
}
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
snake_case_ : List[str] = copy.deepcopy(self )
snake_case_ : int = self.label_schema.copy()
snake_case_ : int = features[self.label_column]
snake_case_ : List[str] = label_schema
return task_template
@property
def UpperCAmelCase__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case_ : Tuple = s_dict.pop(__UpperCamelCase )
elif "subsample" in key:
snake_case_ : Dict = s_dict.pop(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = emb.weight.shape
snake_case_ : List[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ : str = emb.weight.data
return lin_layer
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : int = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Optional[int] = mam_aaa["""args"""]
snake_case_ : int = mam_aaa["""model"""]
snake_case_ : List[str] = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(__UpperCamelCase )
rename_keys(__UpperCamelCase )
snake_case_ : Tuple = state_dict["""decoder.embed_tokens.weight"""].shape[0]
snake_case_ : List[str] = args.share_decoder_input_output_embed
snake_case_ : int = [int(__UpperCamelCase ) for i in args.conv_kernel_sizes.split(""",""" )]
snake_case_ : Optional[Any] = SpeechaTextConfig(
vocab_size=__UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(__UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__UpperCamelCase , num_beams=5 , max_length=2_0_0 , use_cache=__UpperCamelCase , decoder_start_token_id=2 , early_stopping=__UpperCamelCase , )
snake_case_ : int = SpeechaTextForConditionalGeneration(__UpperCamelCase )
snake_case_ , snake_case_ : Dict = model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}' )
if tie_embeds:
snake_case_ : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Dict = lm_head_weights
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCAmelCase : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : int = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : bytes ):
'''simple docstring'''
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(__UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
snake_case_ : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = """sshleifer/tiny-gpt2"""
snake_case_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
snake_case_ : Dict = TensorFlowBenchmark(_lowercase )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = """sgugger/tiny-distilbert-classification"""
snake_case_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_lowercase )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = """sshleifer/tiny-gpt2"""
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case_ : int = TensorFlowBenchmark(_lowercase )
snake_case_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = """sshleifer/tiny-gpt2"""
snake_case_ : str = AutoConfig.from_pretrained(_lowercase )
snake_case_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
snake_case_ : List[Any] = TensorFlowBenchmark(_lowercase , [config] )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = """sshleifer/tiny-gpt2"""
snake_case_ : Optional[int] = AutoConfig.from_pretrained(_lowercase )
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case_ : List[str] = TensorFlowBenchmark(_lowercase , [config] )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = """sshleifer/tiny-gpt2"""
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case_ : List[Any] = TensorFlowBenchmark(_lowercase )
snake_case_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = """sshleifer/tiny-gpt2"""
snake_case_ : List[Any] = AutoConfig.from_pretrained(_lowercase )
snake_case_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_lowercase , [config] )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = """patrickvonplaten/t5-tiny-random"""
snake_case_ : int = AutoConfig.from_pretrained(_lowercase )
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case_ : Tuple = TensorFlowBenchmark(_lowercase , configs=[config] )
snake_case_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = """sshleifer/tiny-gpt2"""
snake_case_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowercase , multi_process=_lowercase , )
snake_case_ : int = TensorFlowBenchmark(_lowercase )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(_lowercase , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(_lowercase , """env.csv""" ) , multi_process=_lowercase , )
snake_case_ : Union[str, Any] = TensorFlowBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Dict = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowercase ):
self.assertTrue(hasattr(_lowercase , """sequential""" ) )
self.assertTrue(hasattr(_lowercase , """cumulative""" ) )
self.assertTrue(hasattr(_lowercase , """current""" ) )
self.assertTrue(hasattr(_lowercase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , """log.txt""" ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , eager_mode=_lowercase , multi_process=_lowercase , )
snake_case_ : Union[str, Any] = TensorFlowBenchmark(_lowercase )
snake_case_ : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowercase , """log.txt""" ) ).exists() )
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = ort.SessionOptions()
snake_case_ : List[str] = False
return options
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = """A red cat sitting on a park bench"""
snake_case_ : Optional[Any] = np.random.RandomState(0 )
snake_case_ : Tuple = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images
snake_case_ : Any = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ : Dict = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """A red cat sitting on a park bench"""
snake_case_ : List[Any] = np.random.RandomState(0 )
snake_case_ : List[Any] = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images
snake_case_ : int = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ : Optional[int] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Tuple = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
"""simple docstring"""
__lowerCAmelCase : List[str] = '''Input must be a string of 8 numbers plus letter'''
__lowerCAmelCase : Tuple = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : List[str] = F'Expected string as input, found {type(__UpperCamelCase ).__name__}'
raise TypeError(__UpperCamelCase )
snake_case_ : Optional[Any] = spanish_id.replace("""-""" , """""" ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
snake_case_ : Any = int(spanish_id_clean[0:8] )
snake_case_ : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
_lowerCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.task_name.lower()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''train'''
_lowerCamelCase = '''dev'''
_lowerCamelCase = '''test'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = None , ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowercase , )
snake_case_ : List[Any] = args
snake_case_ : List[str] = glue_processors[args.task_name]()
snake_case_ : Union[str, Any] = glue_output_modes[args.task_name]
if isinstance(_lowercase , _lowercase ):
try:
snake_case_ : Optional[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
snake_case_ : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
snake_case_ : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ , snake_case_ : List[Any] = label_list[2], label_list[1]
snake_case_ : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : Optional[Any] = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
snake_case_ : str = time.time()
snake_case_ : Tuple = torch.load(_lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
snake_case_ : Any = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
snake_case_ : Optional[Any] = self.processor.get_test_examples(args.data_dir )
else:
snake_case_ : int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
snake_case_ : str = examples[:limit_length]
snake_case_ : Tuple = glue_convert_examples_to_features(
_lowercase , _lowercase , max_length=args.max_seq_length , label_list=_lowercase , output_mode=self.output_mode , )
snake_case_ : List[str] = time.time()
torch.save(self.features , _lowercase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> int:
'''simple docstring'''
return len(self.features )
def __getitem__( self , _lowercase ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.label_list
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__lowerCAmelCase : Tuple = parser.parse_args()
__lowerCAmelCase : Optional[int] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__lowerCAmelCase : Dict = CLIPImageProcessor()
__lowerCAmelCase : List[str] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__lowerCAmelCase : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''longformer'''
def __init__( self , _lowercase = 5_1_2 , _lowercase = 2 , _lowercase = 1 , _lowercase = 0 , _lowercase = 2 , _lowercase = 3_0_5_2_2 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 3_0_7_2 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 5_1_2 , _lowercase = 2 , _lowercase = 0.02 , _lowercase = 1E-12 , _lowercase = False , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = attention_window
snake_case_ : int = sep_token_id
snake_case_ : Dict = bos_token_id
snake_case_ : List[Any] = eos_token_id
snake_case_ : List[Any] = vocab_size
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : List[str] = hidden_act
snake_case_ : str = intermediate_size
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Tuple = onnx_export
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = "default" , _lowercase = None ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase , _lowercase )
snake_case_ : List[Any] = True
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : List[Any] = super().outputs
if self.task == "default":
snake_case_ : Optional[int] = {0: """batch"""}
return outputs
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case_ : Union[str, Any] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
snake_case_ : Dict = 1
return inputs
| 21 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int=True , __UpperCamelCase : Union[str, Any]="pt" ):
'''simple docstring'''
snake_case_ : Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {}
snake_case_ : Any = padding_side
return tokenizer(
[line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Optional[int]=None , ):
'''simple docstring'''
snake_case_ : List[Any] = input_ids.ne(__UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase="train" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="" , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : List[str] = Path(_lowercase ).joinpath(type_path + """.source""" )
snake_case_ : Dict = Path(_lowercase ).joinpath(type_path + """.target""" )
snake_case_ : Optional[int] = self.get_char_lens(self.src_file )
snake_case_ : str = max_source_length
snake_case_ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
snake_case_ : List[Any] = tokenizer
snake_case_ : Dict = prefix
if n_obs is not None:
snake_case_ : Dict = self.src_lens[:n_obs]
snake_case_ : List[Any] = src_lang
snake_case_ : Optional[int] = tgt_lang
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , _lowercase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Optional[Any] = index + 1 # linecache starts at 1
snake_case_ : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , _lowercase ).rstrip("""\n""" )
snake_case_ : Union[str, Any] = linecache.getline(str(self.tgt_file ) , _lowercase ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
)
snake_case_ : Dict = self.tokenizer.generator if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
snake_case_ : str = encode_line(_lowercase , _lowercase , self.max_source_length , """right""" )
snake_case_ : Dict = encode_line(_lowercase , _lowercase , self.max_target_length , """right""" )
snake_case_ : Optional[int] = source_inputs["""input_ids"""].squeeze()
snake_case_ : Optional[Any] = target_inputs["""input_ids"""].squeeze()
snake_case_ : Optional[int] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> List[str]:
'''simple docstring'''
return [len(_lowercase ) for x in Path(_lowercase ).open().readlines()]
def UpperCAmelCase__ ( self , _lowercase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : List[Any] = torch.stack([x["""input_ids"""] for x in batch] )
snake_case_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
snake_case_ : Optional[Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
snake_case_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ : Union[str, Any] = trim_batch(_lowercase , _lowercase )
snake_case_ , snake_case_ : Dict = trim_batch(_lowercase , _lowercase , attention_mask=_lowercase )
snake_case_ : Union[str, Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__lowerCAmelCase : List[Any] = getLogger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Any = get_git_info()
save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any]=4 , **__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with open(__UpperCamelCase ) as f:
return json.load(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = git.Repo(search_parent_directories=__UpperCamelCase )
snake_case_ : Dict = {
"""repo_id""": str(__UpperCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( __UpperCamelCase : Callable , __UpperCamelCase : Iterable ):
'''simple docstring'''
return list(map(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
with open(__UpperCamelCase , """wb""" ) as f:
return pickle.dump(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
def remove_articles(__UpperCamelCase : List[str] ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase : Optional[int] ):
snake_case_ : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = normalize_answer(__UpperCamelCase ).split()
snake_case_ : Optional[int] = normalize_answer(__UpperCamelCase ).split()
snake_case_ : int = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase )
snake_case_ : str = sum(common.values() )
if num_same == 0:
return 0
snake_case_ : Optional[Any] = 1.0 * num_same / len(__UpperCamelCase )
snake_case_ : Tuple = 1.0 * num_same / len(__UpperCamelCase )
snake_case_ : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ):
'''simple docstring'''
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
snake_case_ : Optional[Any] = 0
for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ):
em += exact_match_score(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
em /= len(__UpperCamelCase )
return {"em": em}
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ : Optional[int] = """dropout_rate"""
for p in extra_params:
if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
continue
snake_case_ : int = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p]
setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
return hparams, config
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowerCAmelCase : Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : Dict = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
snake_case_ : Optional[Any] = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
snake_case_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ : Any = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Union[str, Any] = value
elif weight_type == "bias":
snake_case_ : Optional[int] = value
else:
snake_case_ : List[Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Dict = fairseq_model.state_dict()
snake_case_ : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ : Tuple = None
for name, value in fairseq_dict.items():
snake_case_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Dict = True
elif name.split(""".""" )[0] == "proj":
snake_case_ : List[Any] = fairseq_model.proj
snake_case_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Union[str, Any] = True
if "*" in mapped_key:
snake_case_ : Union[str, Any] = name.split(__UpperCamelCase )[0].split(""".""" )[-2]
snake_case_ : Tuple = mapped_key.replace("""*""" , __UpperCamelCase )
if "weight_g" in name:
snake_case_ : Dict = """weight_g"""
elif "weight_v" in name:
snake_case_ : List[str] = """weight_v"""
elif "bias" in name:
snake_case_ : List[str] = """bias"""
elif "weight" in name:
snake_case_ : List[str] = """weight"""
else:
snake_case_ : Tuple = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Dict = full_name.split("""conv_layers.""" )[-1]
snake_case_ : str = name.split(""".""" )
snake_case_ : Any = int(items[0] )
snake_case_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ : Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ : Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : List[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ : Dict = emb.weight.data
return lin_layer
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.readlines()
snake_case_ : Optional[Any] = [line.split(""" """ )[0] for line in lines]
snake_case_ : Optional[int] = len(__UpperCamelCase )
snake_case_ : Union[str, Any] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Tuple , ):
'''simple docstring'''
snake_case_ : List[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase , vocab_size=__UpperCamelCase , decoder_layers=__UpperCamelCase , do_stable_layer_norm=__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
snake_case_ , snake_case_ , snake_case_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ : Any = WavaVecaModel(__UpperCamelCase )
snake_case_ : List[str] = recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase )
snake_case_ : List[str] = SpeechaTextaForCausalLM(__UpperCamelCase )
snake_case_ , snake_case_ : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case_ : int = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case_ : Any = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
snake_case_ : Tuple = False
# add projection layer
snake_case_ : int = nn.Parameter(projection_layer.weight )
snake_case_ : Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case_ : Dict = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase , """vocab.json""" ) )
tokenizer.save_pretrained(__UpperCamelCase )
snake_case_ : Dict = hf_wavavec.config.to_dict()
snake_case_ : List[str] = tokenizer.pad_token_id
snake_case_ : Union[str, Any] = tokenizer.bos_token_id
snake_case_ : str = tokenizer.eos_token_id
snake_case_ : str = """speech_to_text_2"""
snake_case_ : Any = """wav2vec2"""
snake_case_ : Dict = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__lowerCAmelCase : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list[float] ):
'''simple docstring'''
if len(__UpperCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
snake_case_ : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = [[float("""inf""" ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
snake_case_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCamelCase ):
# looping through rows of graph array
for i in range(__UpperCamelCase ):
# looping through columns of graph array
for j in range(__UpperCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : Optional[Any] = dist[i][k] + dist[k][j]
_print_dist(__UpperCamelCase , __UpperCamelCase )
return dist, v
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter number of vertices: '''))
__lowerCAmelCase : Optional[Any] = int(input('''Enter number of edges: '''))
__lowerCAmelCase : Any = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
__lowerCAmelCase : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
__lowerCAmelCase : List[Any] = int(input('''Enter source:'''))
__lowerCAmelCase : Tuple = int(input('''Enter destination:'''))
__lowerCAmelCase : List[Any] = float(input('''Enter weight:'''))
__lowerCAmelCase : Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import json
import sys
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : List[str] ):
'''simple docstring'''
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
snake_case_ : Optional[int] = json.load(__UpperCamelCase )
snake_case_ : List[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__UpperCamelCase ):
snake_case_ : int = results[benchmark_name]
snake_case_ : str = benchmark_name.split("""/""" )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
snake_case_ : Any = """| metric |"""
snake_case_ : Union[str, Any] = """|--------|"""
snake_case_ : Union[str, Any] = """| new / old (diff) |"""
for metric_name in sorted(__UpperCamelCase ):
snake_case_ : Dict = benchmark_res[metric_name]
snake_case_ : Any = metric_vals["""new"""]
snake_case_ : int = metric_vals.get("""old""" , __UpperCamelCase )
snake_case_ : Optional[Any] = metric_vals.get("""diff""" , __UpperCamelCase )
snake_case_ : Any = F' {new_val:f}' if isinstance(__UpperCamelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(__UpperCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(__UpperCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Dict = sys.argv[1]
__lowerCAmelCase : str = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase = None , _lowercase = None ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case_ : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ : Union[str, Any] = torch.zeros(_lowercase , _lowercase )
else:
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = torch.nn.Parameter(_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
snake_case_ : Dict = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case_ : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
snake_case_ : Tuple = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ : Any = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
snake_case_ : Union[str, Any] = [""""""] * batch_size
snake_case_ : Tuple = text_input_ids.shape[-1]
snake_case_ : Dict = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
snake_case_ : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : List[Any] = negative_prompt_embeds.shape[1]
snake_case_ : int = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
snake_case_ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 1_0_0 , _lowercase = 5.0 , _lowercase = 1.0 , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = 1
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[str] = len(_lowercase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
snake_case_ : List[str] = batch_size * num_images_per_prompt
snake_case_ : List[str] = guidance_scale > 1.0
snake_case_ : Tuple = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_lowercase )}.' )
# get the initial completely masked latents unless the user supplied it
snake_case_ : Optional[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ : Optional[Any] = self.transformer.num_vector_embeds - 1
snake_case_ : List[str] = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
snake_case_ : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
snake_case_ : Optional[Any] = self.scheduler.timesteps.to(self.device )
snake_case_ : int = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ : Dict = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ : Any = model_output.chunk(2 )
snake_case_ : Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
snake_case_ : str = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
snake_case_ : Union[str, Any] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : List[str] = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
snake_case_ : str = self.vqvae.config.vq_embed_dim
snake_case_ : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ : Optional[int] = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
snake_case_ : int = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
snake_case_ : int = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Any = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = torch.sort(_lowercase , 1 , descending=_lowercase )
snake_case_ : Optional[Any] = torch.exp(_lowercase )
snake_case_ : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
snake_case_ : Tuple = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ : int = keep_mask[:, :-1, :]
snake_case_ : List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ : int = log_p_x_0.clone()
snake_case_ : Any = -torch.inf # -inf = log(0)
return rv
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['''ConditionalDetrFeatureExtractor''']
__lowerCAmelCase : Optional[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = ['''model.decoder.embed_positions.weights''']
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if "emb" in name:
snake_case_ : int = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ : Union[str, Any] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ : List[str] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ : Any = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ : Optional[Any] = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ : Union[str, Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ : Optional[int] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ : List[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ : List[str] = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCAmelCase ( __UpperCamelCase : OrderedDict , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[str] = list(state_dict.keys() )
snake_case_ : int = {}
for key in keys:
snake_case_ : Optional[int] = state_dict.pop(__UpperCamelCase )
snake_case_ : int = rename_keys(__UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ : int = val[:hidden_size, :]
snake_case_ : int = val[hidden_size : 2 * hidden_size, :]
snake_case_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ : Union[str, Any] = val
else:
snake_case_ : Optional[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
snake_case_ : str = 1_0_2_4
snake_case_ : List[str] = 2_4
snake_case_ : List[Any] = 1_6
elif checkpoint == "medium":
snake_case_ : List[str] = 1_5_3_6
snake_case_ : str = 4_8
snake_case_ : List[Any] = 2_4
elif checkpoint == "large":
snake_case_ : Any = 2_0_4_8
snake_case_ : Dict = 4_8
snake_case_ : str = 3_2
else:
raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
snake_case_ : Dict = MusicgenDecoderConfig(
hidden_size=__UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , )
return config
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple=None , __UpperCamelCase : str=None , __UpperCamelCase : Optional[Any]="cpu" ):
'''simple docstring'''
snake_case_ : Any = MusicGen.get_pretrained(__UpperCamelCase , device=__UpperCamelCase )
snake_case_ : List[Any] = decoder_config_from_checkpoint(__UpperCamelCase )
snake_case_ : Optional[Any] = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ : int = rename_state_dict(
__UpperCamelCase , hidden_size=decoder_config.hidden_size )
snake_case_ : Union[str, Any] = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ : List[str] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ : Dict = MusicgenForCausalLM(__UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ : List[Any] = decoder.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' )
if len(__UpperCamelCase ) > 0:
raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
snake_case_ : Optional[int] = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase , audio_encoder=__UpperCamelCase , decoder=__UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCamelCase )
# check we can do a forward pass
snake_case_ : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ : Any = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ : Dict = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ : Dict = MusicgenProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
# set the appropriate bos/pad token ids
snake_case_ : Union[str, Any] = 2_0_4_8
snake_case_ : Optional[int] = 2_0_4_8
# set other default generation config params
snake_case_ : Dict = int(3_0 * audio_encoder.config.frame_rate )
snake_case_ : Optional[Any] = True
snake_case_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if repo_id:
logger.info(F'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__UpperCamelCase )
processor.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = tempfile.mkdtemp()
# fmt: off
snake_case_ : Optional[int] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case_ : Tuple = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case_ : List[str] = {"""unk_token""": """<unk>"""}
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
snake_case_ : Optional[Any] = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
snake_case_ : Tuple = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Any:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case_ : Optional[int] = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_rust_tokenizer()
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
snake_case_ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ : int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ : List[str] = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
snake_case_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : str = self.prepare_image_inputs()
snake_case_ : Union[str, Any] = image_processor(_lowercase , return_tensors="""np""" )
snake_case_ : Any = processor(images=_lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : int = """lower newer"""
snake_case_ : List[str] = processor(text=_lowercase )
snake_case_ : Optional[int] = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = self.get_image_processor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Union[str, Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : str = """lower newer"""
snake_case_ : Tuple = self.prepare_image_inputs()
snake_case_ : Dict = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : str = processor.batch_decode(_lowercase )
snake_case_ : Any = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : Union[str, Any] = """lower newer"""
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : Optional[Any] = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''mra'''
def __init__( self , _lowercase=5_0_2_6_5 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1 , _lowercase=0.02 , _lowercase=1E-5 , _lowercase="absolute" , _lowercase=4 , _lowercase="full" , _lowercase=0 , _lowercase=0 , _lowercase=1 , _lowercase=0 , _lowercase=2 , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : List[str] = type_vocab_size
snake_case_ : Dict = layer_norm_eps
snake_case_ : int = position_embedding_type
snake_case_ : List[str] = block_per_row
snake_case_ : Dict = approx_mode
snake_case_ : str = initial_prior_first_n_blocks
snake_case_ : Any = initial_prior_diagonal_n_blocks
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , ):
'''simple docstring'''
snake_case_ : int = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
snake_case_ , snake_case_ : List[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
snake_case_ : Optional[Any] = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
snake_case_ : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case_ : Optional[Any] = file_path.read_text(encoding="""utf-8""" )
else:
snake_case_ : Dict = output_path.read_text(encoding="""utf-8""" )
snake_case_ : Any = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : int , ):
'''simple docstring'''
snake_case_ : int = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
snake_case_ : Tuple = input_paths[compression_format]
if input_path is None:
snake_case_ : str = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
snake_case_ : str = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
snake_case_ : List[Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case_ : Optional[int] = file_path.read_text(encoding="""utf-8""" )
else:
snake_case_ : str = output_path.read_text(encoding="""utf-8""" )
snake_case_ : Union[str, Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict ):
'''simple docstring'''
import tarfile
snake_case_ : Any = tmp_path / """data_dot_dot"""
directory.mkdir()
snake_case_ : Dict = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
import tarfile
snake_case_ : List[str] = tmp_path / """data_sym_link"""
directory.mkdir()
snake_case_ : Optional[Any] = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
snake_case_ : Dict = insecure_tar_files[insecure_tar_file]
snake_case_ : str = tmp_path / """extracted"""
TarExtractor.extract(__UpperCamelCase , __UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
snake_case_ : Optional[Any] = (
B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : int | None = None , __UpperCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
snake_case_ : int = 0
if end is None:
snake_case_ : int = len(__UpperCamelCase ) - 1
if start >= end:
return
snake_case_ : str = (start + end) // 2
slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase )
if sequence[end] < sequence[mid]:
snake_case_ , snake_case_ : str = sequence[mid], sequence[end]
slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 3_2 , _lowercase = 6_4 , _lowercase = 2_0 , _lowercase = 7_6_8 , _lowercase=7_7 , _lowercase=4 , _lowercase = 0.0 , _lowercase = "silu" , _lowercase = None , _lowercase = None , _lowercase = "linear" , _lowercase = "prd" , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = num_attention_heads
snake_case_ : str = attention_head_dim
snake_case_ : Optional[Any] = num_attention_heads * attention_head_dim
snake_case_ : List[str] = additional_embeddings
snake_case_ : Any = time_embed_dim or inner_dim
snake_case_ : Any = embedding_proj_dim or embedding_dim
snake_case_ : List[str] = clip_embed_dim or embedding_dim
snake_case_ : int = Timesteps(_lowercase , _lowercase , 0 )
snake_case_ : Dict = TimestepEmbedding(_lowercase , _lowercase , out_dim=_lowercase , act_fn=_lowercase )
snake_case_ : Union[str, Any] = nn.Linear(_lowercase , _lowercase )
if embedding_proj_norm_type is None:
snake_case_ : List[str] = None
elif embedding_proj_norm_type == "layer":
snake_case_ : Dict = nn.LayerNorm(_lowercase )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase )
if encoder_hid_proj_type is None:
snake_case_ : str = None
elif encoder_hid_proj_type == "linear":
snake_case_ : List[str] = nn.Linear(_lowercase , _lowercase )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
snake_case_ : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _lowercase ) )
if added_emb_type == "prd":
snake_case_ : Dict = nn.Parameter(torch.zeros(1 , 1 , _lowercase ) )
elif added_emb_type is None:
snake_case_ : int = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
snake_case_ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
_lowercase , _lowercase , _lowercase , dropout=_lowercase , activation_fn="""gelu""" , attention_bias=_lowercase , )
for d in range(_lowercase )
] )
if norm_in_type == "layer":
snake_case_ : Optional[Any] = nn.LayerNorm(_lowercase )
elif norm_in_type is None:
snake_case_ : List[str] = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
snake_case_ : int = nn.LayerNorm(_lowercase )
snake_case_ : str = nn.Linear(_lowercase , _lowercase )
snake_case_ : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
snake_case_ : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _lowercase , persistent=_lowercase )
snake_case_ : Any = nn.Parameter(torch.zeros(1 , _lowercase ) )
snake_case_ : Dict = nn.Parameter(torch.zeros(1 , _lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def fn_recursive_add_processors(_lowercase , _lowercase , _lowercase ):
if hasattr(_lowercase , """set_processor""" ):
snake_case_ : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , _lowercase , _lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowercase , _lowercase , _lowercase )
return processors
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(_lowercase )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(_lowercase , _lowercase , _lowercase ):
if hasattr(_lowercase , """set_processor""" ):
if not isinstance(_lowercase , _lowercase ):
module.set_processor(_lowercase )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , _lowercase , _lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = True , ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.shape[0]
snake_case_ : List[Any] = timestep
if not torch.is_tensor(_lowercase ):
snake_case_ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
snake_case_ : str = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ : Optional[Any] = timesteps * torch.ones(_lowercase , dtype=timesteps.dtype , device=timesteps.device )
snake_case_ : Tuple = self.time_proj(_lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case_ : List[Any] = timesteps_projected.to(dtype=self.dtype )
snake_case_ : Optional[Any] = self.time_embedding(_lowercase )
if self.embedding_proj_norm is not None:
snake_case_ : Optional[Any] = self.embedding_proj_norm(_lowercase )
snake_case_ : List[Any] = self.embedding_proj(_lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case_ : List[Any] = self.encoder_hidden_states_proj(_lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
snake_case_ : Dict = self.proj_in(_lowercase )
snake_case_ : str = self.positional_embedding.to(hidden_states.dtype )
snake_case_ : Any = []
snake_case_ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
snake_case_ : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
snake_case_ : Dict = hidden_states[:, None, :]
snake_case_ : List[str] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case_ : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_lowercase , -1 , -1 )
additional_embeds.append(_lowercase )
snake_case_ : List[Any] = torch.cat(
_lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case_ : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case_ : List[Any] = F.pad(
_lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case_ : int = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case_ : Union[str, Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
snake_case_ : Dict = F.pad(_lowercase , (0, self.additional_embeddings) , value=0.0 )
snake_case_ : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
snake_case_ : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
snake_case_ : List[str] = self.norm_in(_lowercase )
for block in self.transformer_blocks:
snake_case_ : Optional[Any] = block(_lowercase , attention_mask=_lowercase )
snake_case_ : int = self.norm_out(_lowercase )
if self.prd_embedding is not None:
snake_case_ : Dict = hidden_states[:, -1]
else:
snake_case_ : Tuple = hidden_states[:, additional_embeddings_len:]
snake_case_ : List[Any] = self.proj_to_clip_embeddings(_lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 21 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowercase ).to(_lowercase )
snake_case_ : Any = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
snake_case_ : str = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
snake_case_ : int = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
snake_case_ : Union[str, Any] = -(labels.shape[-1] * loss.item())
snake_case_ : List[Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=9_9 , _lowercase=1_3 , _lowercase=1_6 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=2 , _lowercase=3_2 , _lowercase=4 , _lowercase=4 , _lowercase=3_0 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = parent
snake_case_ : Dict = batch_size
snake_case_ : Optional[Any] = decoder_seq_length
# For common tests
snake_case_ : int = self.decoder_seq_length
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_attention_mask
snake_case_ : Dict = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Optional[int] = d_model
snake_case_ : Union[str, Any] = d_model
snake_case_ : Dict = decoder_layers
snake_case_ : List[Any] = decoder_layers
snake_case_ : List[str] = decoder_ffn_dim
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : Dict = decoder_attention_heads
snake_case_ : Any = eos_token_id
snake_case_ : List[Any] = bos_token_id
snake_case_ : str = pad_token_id
snake_case_ : List[Any] = decoder_start_token_id
snake_case_ : int = use_cache
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Tuple = None
snake_case_ : List[str] = decoder_seq_length
snake_case_ : Dict = 2
snake_case_ : str = 1
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : List[Any] = None
if self.use_attention_mask:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ : Any = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : int = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Dict:
'''simple docstring'''
snake_case_ : Any = True
snake_case_ : Optional[Any] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
snake_case_ : Union[str, Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ : str = model(_lowercase , use_cache=_lowercase )
snake_case_ : Optional[Any] = model(_lowercase )
snake_case_ : List[Any] = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
snake_case_ : int = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
snake_case_ : Union[str, Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : str = model(_lowercase )["""last_hidden_state"""]
snake_case_ : List[str] = model(_lowercase , past_key_values=_lowercase )["""last_hidden_state"""]
# select random slice
snake_case_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
_lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
_lowerCamelCase = True
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
snake_case_ : str = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = x
snake_case_ : str = y
for step in range(__UpperCamelCase ): # noqa: B007
snake_case_ : List[str] = a * a - b * b + x
snake_case_ : Tuple = 2 * a * b + y
snake_case_ : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCAmelCase ( __UpperCamelCase : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __lowerCAmelCase ( __UpperCamelCase : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase , 1 , 1 ) )
def __lowerCAmelCase ( __UpperCamelCase : int = 8_0_0 , __UpperCamelCase : int = 6_0_0 , __UpperCamelCase : float = -0.6 , __UpperCamelCase : float = 0 , __UpperCamelCase : float = 3.2 , __UpperCamelCase : int = 5_0 , __UpperCamelCase : bool = True , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
snake_case_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
snake_case_ : Dict = figure_width / image_width * image_height
snake_case_ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case_ : Tuple = get_distance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case_ : str = get_color_coded_rgb(__UpperCamelCase )
else:
snake_case_ : List[Any] = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__lowerCAmelCase : Tuple = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
def decorator(__UpperCamelCase : str ):
snake_case_ : Dict = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
def __lowerCAmelCase ( *__UpperCamelCase : List[str] ):
'''simple docstring'''
def decorator(__UpperCamelCase : Optional[int] ):
snake_case_ : Optional[int] = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __new__( cls , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = super().__new__(cls , _lowercase , _lowercase , _lowercase )
if not hasattr(_lowercase , """key_handler""" ):
setattr(_lowercase , """key_handler""" , {} )
setattr(_lowercase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case_ : List[str] = getattr(_lowercase , """handle_key""" , [] )
for key in handled_keys:
snake_case_ : List[str] = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
snake_case_ : int = ord(_lowercase )
snake_case_ : Tuple = cls.key_handler.get(_lowercase )
if handler:
snake_case_ : Union[str, Any] = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : str = nn.Linear(3 , 4 )
snake_case_ : Tuple = nn.BatchNormad(4 )
snake_case_ : str = nn.Linear(4 , 5 )
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
return output + 1
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = ModelForTest()
snake_case_ : Union[str, Any] = ModelHook()
add_hook_to_module(_lowercase , _lowercase )
self.assertEqual(test_model._hf_hook , _lowercase )
self.assertTrue(hasattr(_lowercase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowercase )
self.assertFalse(hasattr(_lowercase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowercase , """_old_forward""" ) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : str = ModelForTest()
snake_case_ : List[Any] = ModelHook()
add_hook_to_module(_lowercase , _lowercase )
add_hook_to_module(_lowercase , _lowercase , append=_lowercase )
self.assertEqual(isinstance(test_model._hf_hook , _lowercase ) , _lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(_lowercase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowercase )
self.assertFalse(hasattr(_lowercase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowercase , """_old_forward""" ) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = ModelForTest()
snake_case_ : List[Any] = torch.randn(2 , 3 )
snake_case_ : List[Any] = test_model(x + 1 )
snake_case_ : Tuple = test_model(x + 2 )
snake_case_ : List[str] = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Dict = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case_ : List[Any] = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case_ : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : List[Any] = test_model(_lowercase )
assert torch.allclose(_lowercase , _lowercase , atol=1E-5 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = ModelForTest()
snake_case_ : Union[str, Any] = torch.randn(2 , 3 )
snake_case_ : Optional[Any] = test_model(_lowercase )
snake_case_ : int = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case_ : Dict = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Dict = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case_ : List[str] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : str = test_model(_lowercase )
assert torch.allclose(_lowercase , output + 2 , atol=1E-5 )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = ModelForTest()
snake_case_ : Dict = torch.randn(2 , 3 )
snake_case_ : Optional[int] = test_model(_lowercase )
snake_case_ : List[str] = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = test_model(_lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
snake_case_ : Tuple = torch.randn(2 , 3 )
snake_case_ : Optional[Any] = model(_lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase ) )
snake_case_ : List[Any] = torch.randn(2 , 3 ).to(0 )
snake_case_ : Tuple = model(_lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ : Optional[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ : Union[str, Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
snake_case_ : Optional[int] = torch.randn(2 , 3 )
snake_case_ : Dict = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
snake_case_ : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ : Dict = torch.randn(2 , 3 )
snake_case_ : Dict = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ : List[Any] = torch.device(_lowercase )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
snake_case_ : int = torch.randn(2 , 3 )
snake_case_ : Optional[Any] = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ : List[Any] = torch.randn(2 , 3 )
snake_case_ : int = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ : Dict = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ : List[Any] = torch.device(_lowercase )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
snake_case_ : Any = torch.randn(2 , 3 )
snake_case_ : int = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ : Optional[int] = torch.randn(2 , 3 )
snake_case_ : str = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : str = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : Optional[Any] = features.copy() if features else default_expected_features
snake_case_ : Union[str, Any] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Optional[Any] = ParquetDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path / """cache"""
snake_case_ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : str = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ):
'''simple docstring'''
if issubclass(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Union[str, Any] = parquet_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Any = [parquet_path]
snake_case_ : int = tmp_path / """cache"""
snake_case_ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : Union[str, Any] = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=("train",) ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
snake_case_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[int] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = tmp_path / """cache"""
snake_case_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : Optional[int] = features.copy() if features else default_expected_features
snake_case_ : Optional[int] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Optional[Any] = ParquetDatasetReader({"""train""": parquet_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
'''simple docstring'''
if split:
snake_case_ : Optional[int] = {split: parquet_path}
else:
snake_case_ : Tuple = """train"""
snake_case_ : List[Any] = {"""train""": parquet_path, """test""": parquet_path}
snake_case_ : Any = tmp_path / """cache"""
snake_case_ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : str = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = ParquetDatasetWriter(__UpperCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case_ : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
snake_case_ : Optional[int] = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Tuple = str(shared_datadir / """test_image_rgb.jpg""" )
snake_case_ : Union[str, Any] = {"""image""": [image_path]}
snake_case_ : Optional[int] = Features({"""image""": Image()} )
snake_case_ : str = Dataset.from_dict(__UpperCamelCase , features=__UpperCamelCase )
snake_case_ : Optional[int] = ParquetDatasetWriter(__UpperCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case_ : Optional[int] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
snake_case_ : int = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
assert get_writer_batch_size(__UpperCamelCase ) == expected
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["""stage2""", """stage3""", """stage4"""] , )
snake_case_ : Optional[Any] = DetaConfig(
backbone_config=__UpperCamelCase , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=__UpperCamelCase , with_box_refine=__UpperCamelCase , two_stage=__UpperCamelCase , )
# set labels
snake_case_ : str = """huggingface/label-files"""
if "o365" in model_name:
snake_case_ : List[str] = 3_6_6
snake_case_ : Dict = """object365-id2label.json"""
else:
snake_case_ : Optional[Any] = 9_1
snake_case_ : int = """coco-detection-id2label.json"""
snake_case_ : Optional[Any] = num_labels
snake_case_ : int = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : Optional[int] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : Dict = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Tuple = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Optional[int] = dct.pop(__UpperCamelCase )
snake_case_ : Tuple = val
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case_ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case_ : Optional[int] = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
snake_case_ : Optional[int] = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Union[str, Any] = in_proj_weight[:dim, :]
snake_case_ : List[str] = in_proj_bias[: dim]
snake_case_ : str = in_proj_weight[
dim : dim * 2, :
]
snake_case_ : Any = in_proj_bias[
dim : dim * 2
]
snake_case_ : Any = in_proj_weight[
-dim :, :
]
snake_case_ : str = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
snake_case_ : List[str] = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : str = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Optional[Any] = in_proj_weight[:hidden_size, :]
snake_case_ : Union[str, Any] = in_proj_bias[:hidden_size]
snake_case_ : Tuple = in_proj_weight[
hidden_size : hidden_size * 2, :
]
snake_case_ : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ : Union[str, Any] = in_proj_weight[-hidden_size:, :]
snake_case_ : List[Any] = in_proj_bias[-hidden_size:]
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = get_deta_config(__UpperCamelCase )
# load original state dict
if model_name == "deta-swin-large":
snake_case_ : Optional[Any] = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
snake_case_ : Any = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(F'Model name {model_name} not supported' )
snake_case_ : int = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__UpperCamelCase , param.shape )
# rename keys
snake_case_ : Union[str, Any] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_swin_q_k_v(__UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCamelCase , __UpperCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
snake_case_ : Optional[int] = state_dict.pop(__UpperCamelCase )
snake_case_ : Tuple = val
if "input_proj" in key:
snake_case_ : Dict = state_dict.pop(__UpperCamelCase )
snake_case_ : List[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
snake_case_ : Optional[Any] = state_dict.pop(__UpperCamelCase )
snake_case_ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
snake_case_ : List[str] = DetaForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
snake_case_ : int = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__UpperCamelCase )
# load image processor
snake_case_ : Any = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
snake_case_ : Dict = prepare_img()
snake_case_ : str = processor(images=__UpperCamelCase , return_tensors="""pt""" )
snake_case_ : Optional[Any] = encoding["""pixel_values"""]
snake_case_ : int = model(pixel_values.to(__UpperCamelCase ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
snake_case_ : Any = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
snake_case_ : Optional[int] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
snake_case_ : Dict = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
snake_case_ : Optional[Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__UpperCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__UpperCamelCase ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( __UpperCamelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__lowerCAmelCase : Union[str, Any] = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=_lowercase , required=_lowercase , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=_lowercase , required=_lowercase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=_lowercase , required=_lowercase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=_lowercase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=_lowercase , default=_lowercase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase , ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f'Loading model {model_type}' )
snake_case_ : List[Any] = model_type
snake_case_ : Optional[Any] = tf_checkpoint
snake_case_ : int = pytorch_dump_output
snake_case_ : Dict = config
snake_case_ : int = finetuning_task_name
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
snake_case_ : Optional[int] = self._tf_checkpoint
snake_case_ : str = """"""
else:
snake_case_ : List[Any] = self._tf_checkpoint
snake_case_ : str = """"""
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 21 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : int=False ):
'''simple docstring'''
snake_case_ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Tuple=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : Any = """"""
else:
snake_case_ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : int = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
snake_case_ : int = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : List[str] = in_proj_bias[: config.hidden_size]
snake_case_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : int = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = dct.pop(__UpperCamelCase )
snake_case_ : List[Any] = val
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = ViTMSNConfig()
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Dict = """datasets/huggingface/label-files"""
snake_case_ : Optional[int] = """imagenet-1k-id2label.json"""
snake_case_ : int = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase ) , """r""" ) )
snake_case_ : Dict = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[Any] = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case_ : Dict = 3_8_4
snake_case_ : str = 1_5_3_6
snake_case_ : Tuple = 6
elif "l16" in checkpoint_url:
snake_case_ : int = 1_0_2_4
snake_case_ : Tuple = 4_0_9_6
snake_case_ : Union[str, Any] = 2_4
snake_case_ : Union[str, Any] = 1_6
snake_case_ : Optional[Any] = 0.1
elif "b4" in checkpoint_url:
snake_case_ : str = 4
elif "l7" in checkpoint_url:
snake_case_ : List[Any] = 7
snake_case_ : List[str] = 1_0_2_4
snake_case_ : List[str] = 4_0_9_6
snake_case_ : Optional[Any] = 2_4
snake_case_ : List[Any] = 1_6
snake_case_ : int = 0.1
snake_case_ : List[Any] = ViTMSNModel(__UpperCamelCase )
snake_case_ : str = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="""cpu""" )["""target_encoder"""]
snake_case_ : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCamelCase )
snake_case_ : Optional[Any] = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , base_model=__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
snake_case_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
snake_case_ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCamelCase , image_std=__UpperCamelCase )
snake_case_ : List[Any] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
snake_case_ : Union[str, Any] = model(**__UpperCamelCase )
snake_case_ : int = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case_ : Any = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
snake_case_ : List[Any] = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
snake_case_ : List[Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
snake_case_ : Tuple = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
snake_case_ : Union[str, Any] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCamelCase , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = str(id_ )
snake_case_ : Optional[int] = None
snake_case_ : Dict = None
snake_case_ : Tuple = []
snake_case_ : int = {} # {vertex:distance}
def __lt__( self , _lowercase ) -> str:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> int:
'''simple docstring'''
return self.id
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
self.neighbors.append(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : Dict = weight
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : Vertex ):
'''simple docstring'''
snake_case_ : Optional[int] = []
for u in graph:
snake_case_ : Optional[Any] = math.inf
snake_case_ : Optional[int] = None
snake_case_ : Tuple = 0
snake_case_ : Tuple = graph[:]
while q:
snake_case_ : Union[str, Any] = min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case_ : Optional[int] = u
snake_case_ : int = u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : Vertex ):
'''simple docstring'''
for u in graph:
snake_case_ : Optional[Any] = math.inf
snake_case_ : str = None
snake_case_ : Optional[int] = 0
snake_case_ : int = list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
snake_case_ : Optional[Any] = hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case_ : List[Any] = u
snake_case_ : Dict = u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __lowerCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list[float] , __UpperCamelCase : list[float] ):
'''simple docstring'''
snake_case_ : List[str] = sorted(numsa + numsa )
snake_case_ , snake_case_ : Dict = divmod(len(__UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : int = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__lowerCAmelCase : Dict = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''levit'''
def __init__( self , _lowercase=2_2_4 , _lowercase=3 , _lowercase=3 , _lowercase=2 , _lowercase=1 , _lowercase=1_6 , _lowercase=[1_2_8, 2_5_6, 3_8_4] , _lowercase=[4, 8, 1_2] , _lowercase=[4, 4, 4] , _lowercase=[1_6, 1_6, 1_6] , _lowercase=0 , _lowercase=[2, 2, 2] , _lowercase=[2, 2, 2] , _lowercase=0.02 , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Union[str, Any] = image_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = kernel_size
snake_case_ : str = stride
snake_case_ : Any = padding
snake_case_ : Dict = hidden_sizes
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : List[str] = depths
snake_case_ : List[Any] = key_dim
snake_case_ : List[Any] = drop_path_rate
snake_case_ : List[Any] = patch_size
snake_case_ : int = attention_ratio
snake_case_ : int = mlp_ratio
snake_case_ : int = initializer_range
snake_case_ : Tuple = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=True ):
'''simple docstring'''
model.train()
snake_case_ : str = model(__UpperCamelCase )
snake_case_ : Tuple = F.mse_loss(__UpperCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str=False ):
'''simple docstring'''
set_seed(4_2 )
snake_case_ : int = RegressionModel()
snake_case_ : Dict = deepcopy(__UpperCamelCase )
snake_case_ : List[str] = RegressionDataset(length=8_0 )
snake_case_ : Union[str, Any] = DataLoader(__UpperCamelCase , batch_size=1_6 )
model.to(accelerator.device )
if sched:
snake_case_ : Any = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ : str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ : Tuple = LambdaLR(__UpperCamelCase , lr_lambda=lambda __UpperCamelCase : epoch**0.65 )
snake_case_ : Optional[Any] = LambdaLR(__UpperCamelCase , lr_lambda=lambda __UpperCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = accelerator.prepare(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
snake_case_ , snake_case_ : str = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : int = get_training_setup(__UpperCamelCase )
# Use a single batch
snake_case_ , snake_case_ : List[Any] = next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ : List[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
snake_case_ : Union[str, Any] = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : Optional[int] = get_training_setup(__UpperCamelCase )
# Use a single batch
snake_case_ , snake_case_ : str = next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
snake_case_ : Union[str, Any] = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def __lowerCAmelCase ( __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
snake_case_ : List[Any] = Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ : List[str] = get_training_setup(__UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
snake_case_ , snake_case_ : int = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ : Any = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__UpperCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
snake_case_ : str = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
GradientState._reset_state()
def __lowerCAmelCase ( __UpperCamelCase : List[Any]=False , __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Dict = Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = get_training_setup(__UpperCamelCase , __UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
snake_case_ , snake_case_ : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ : str = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__UpperCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
snake_case_ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__UpperCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = Accelerator()
snake_case_ : Tuple = RegressionDataset(length=8_0 )
snake_case_ : Optional[int] = DataLoader(__UpperCamelCase , batch_size=1_6 )
snake_case_ : Any = RegressionDataset(length=9_6 )
snake_case_ : Union[str, Any] = DataLoader(__UpperCamelCase , batch_size=1_6 )
snake_case_ , snake_case_ : Optional[Any] = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if iteration < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if batch_num < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = Accelerator()
snake_case_ : Optional[int] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__UpperCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__UpperCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(__UpperCamelCase , __UpperCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = 1 / 2_5_5 , _lowercase = True , _lowercase = 8 , **_lowercase , ) -> None:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[Any] = do_pad
snake_case_ : Optional[Any] = pad_size
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ : Union[str, Any] = get_image_size(_lowercase )
snake_case_ : Any = (old_height // size + 1) * size - old_height
snake_case_ : Tuple = (old_width // size + 1) * size - old_width
return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : int = do_pad if do_pad is not None else self.do_pad
snake_case_ : Dict = pad_size if pad_size is not None else self.pad_size
snake_case_ : Dict = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
snake_case_ : Any = [to_numpy_array(_lowercase ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_pad:
snake_case_ : List[Any] = [self.pad(_lowercase , size=_lowercase ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case_ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__lowerCAmelCase : int = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
)
__lowerCAmelCase : Tuple = '''|'''.join(sys.argv[1:])
__lowerCAmelCase : List[str] = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = feature_size
snake_case_ : Optional[int] = sampling_rate
snake_case_ : Dict = padding_value
snake_case_ : Any = kwargs.pop("""padding_side""" , """right""" )
snake_case_ : Dict = kwargs.pop("""return_attention_mask""" , _lowercase )
super().__init__(**_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = True , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case_ : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
snake_case_ : Union[str, Any] = processed_features[self.model_input_names[0]]
snake_case_ : Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_lowercase ) == 0:
if return_attention_mask:
snake_case_ : Dict = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case_ : Union[str, Any] = required_input[0]
if isinstance(_lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_lowercase ):
snake_case_ : Any = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_lowercase ):
snake_case_ : Optional[Any] = """tf"""
elif is_torch_tensor(_lowercase ):
snake_case_ : Union[str, Any] = """pt"""
elif isinstance(_lowercase , (int, float, list, tuple, np.ndarray) ):
snake_case_ : int = """np"""
else:
raise ValueError(
f'type of {first_element} unknown: {type(_lowercase )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case_ : Dict = to_numpy(_lowercase )
else:
snake_case_ : Optional[int] = [to_numpy(_lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case_ : List[str] = self._get_padding_strategies(padding=_lowercase , max_length=_lowercase )
snake_case_ : List[Any] = processed_features[self.model_input_names[0]]
snake_case_ : str = len(_lowercase )
if not all(len(_lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
snake_case_ : Any = []
for i in range(_lowercase ):
snake_case_ : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case_ : Any = self._truncate(
_lowercase , max_length=_lowercase , pad_to_multiple_of=_lowercase , truncation=_lowercase , )
truncated_inputs.append(_lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case_ : Optional[int] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case_ : List[str] = PaddingStrategy.MAX_LENGTH
snake_case_ : List[Any] = {}
for i in range(_lowercase ):
# padding
snake_case_ : str = self._pad(
truncated_inputs[i] , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case_ : Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
snake_case_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(_lowercase )
return BatchFeature(_lowercase , tensor_type=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = PaddingStrategy.DO_NOT_PAD , _lowercase = None , _lowercase = None , ) -> dict:
'''simple docstring'''
snake_case_ : List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case_ : Dict = len(_lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case_ : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case_ : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case_ : Any = np.ones(len(_lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case_ : Any = max_length - len(_lowercase )
if self.padding_side == "right":
if return_attention_mask:
snake_case_ : Any = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
snake_case_ : List[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case_ : Dict = np.pad(
_lowercase , _lowercase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case_ : Optional[Any] = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
snake_case_ : Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case_ : Optional[Any] = np.pad(
_lowercase , _lowercase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , ) -> str:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
snake_case_ : Union[str, Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case_ : List[str] = len(_lowercase ) > max_length
if needs_to_be_truncated:
snake_case_ : Tuple = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case_ : Any = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self , _lowercase=False , _lowercase=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
snake_case_ : Any = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_lowercase , _lowercase ):
snake_case_ : Union[str, Any] = PaddingStrategy(_lowercase )
elif isinstance(_lowercase , _lowercase ):
snake_case_ : Dict = padding
else:
snake_case_ : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__lowerCAmelCase : List[Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase : Dict = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : Optional[Any] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(F'{key} -> {new_key}' )
snake_case_ : Tuple = s_dict.pop(__UpperCamelCase )
return s_dict
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ , snake_case_ : str = emb.weight.shape
snake_case_ : List[str] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : Optional[int] = os.path.basename(__UpperCamelCase )
snake_case_ : List[str] = url.split("""/""" )[-2]
snake_case_ : Tuple = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not os.path.isfile(__UpperCamelCase ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(__UpperCamelCase ):
snake_case_ : Optional[Any] = open(__UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(__UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(__UpperCamelCase ) as source, open(__UpperCamelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=8_0 , unit="""iB""" , unit_scale=__UpperCamelCase , unit_divisor=1_0_2_4 ) as loop:
while True:
snake_case_ : Optional[int] = source.read(8_1_9_2 )
if not buffer:
break
output.write(__UpperCamelCase )
loop.update(len(__UpperCamelCase ) )
snake_case_ : List[Any] = open(__UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(__UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
snake_case_ : int = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Optional[int] = original_checkpoint["""dims"""]
snake_case_ : int = original_checkpoint["""model_state_dict"""]
snake_case_ : List[str] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__UpperCamelCase )
rename_keys(__UpperCamelCase )
snake_case_ : Union[str, Any] = True
snake_case_ : int = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
snake_case_ : Dict = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__UpperCamelCase , decoder_ffn_dim=__UpperCamelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
snake_case_ : Optional[Any] = WhisperForConditionalGeneration(__UpperCamelCase )
snake_case_ , snake_case_ : Dict = model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}' )
if tie_embeds:
snake_case_ : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCAmelCase : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.