code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from PIL import Image
def A__ ( __A , __A ):
'''simple docstring'''
def brightness(__A ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__A )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
lowerCAmelCase : str =change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__ ( __A , __A ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_lowerCamelCase : List[str] = torch.permute(__A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__A ):
# linear layer
_lowerCamelCase : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
_lowerCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Dict = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def A__ ( __A , __A , __A ):
'''simple docstring'''
if "metadata" in layer:
_lowerCamelCase : List[str] = layer.split("""metadata""" )
_lowerCamelCase : List[str] = """""".join(split_layer[0] )[:-1]
_lowerCamelCase : Tuple = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_lowerCamelCase : Union[str, Any] = layer.split("""kvstore""" )
_lowerCamelCase : Any = """""".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_lowerCamelCase : Any = layer.split("""/""" )
_lowerCamelCase : Dict = """/""".join(split_layer[:-1] )
_lowerCamelCase : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : str = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = """file"""
else:
_lowerCamelCase : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = rename_keys(__A )
_lowerCamelCase : Optional[Any] = {}
for k, v in current_block.items():
_lowerCamelCase : Optional[Any] = v
_lowerCamelCase : Dict = new_current_block
torch.save(__A , __A )
def A__ ( __A , __A , __A , __A , __A = WEIGHTS_NAME ):
'''simple docstring'''
_lowerCamelCase : List[str] = convert_file_size_to_int(__A )
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Any = 0
_lowerCamelCase : Tuple = 0
os.makedirs(__A , exist_ok=__A )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_lowerCamelCase : Any = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_lowerCamelCase : Any = flatten_dict(__A , sep="""/""" )
_lowerCamelCase : int = {}
for layer in checkpoint_info.keys():
_lowerCamelCase : Union[str, Any] = get_key_and_tensorstore_dict(
__A , __A , __A )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Tuple = content
else:
_lowerCamelCase : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Dict = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : Dict = torch.tensor(__A )
_lowerCamelCase : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase : Optional[int] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __A )
_lowerCamelCase : List[Any] = """/""".join(__A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : List[str] = os.path.join(
__A , weights_name.replace(""".bin""" , F"""-{len(__A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[Any] = raw_weights.to(getattr(__A , __A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : Any = os.path.join(__A , weights_name.replace(""".bin""" , F"""-{len(__A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : int = {}
_lowerCamelCase : List[str] = {}
for idx, shard in enumerate(__A ):
_lowerCamelCase : Optional[Any] = weights_name.replace(
""".bin""" , F"""-{idx+1:05d}-of-{len(__A ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Tuple = os.path.join(__A , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__A , os.path.join(__A , __A ) )
_lowerCamelCase : Optional[Any] = shard
for key in shard:
_lowerCamelCase : List[str] = shard_file
# Add the metadata
_lowerCamelCase : List[str] = {"""total_size""": total_size}
_lowerCamelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__A , __A ) , """w""" , encoding="""utf-8""" ) as f:
_lowerCamelCase : Optional[int] = json.dumps(__A , indent=2 , sort_keys=__A ) + """\n"""
f.write(__A )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowerCAmelCase : str =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_lowerCamelCase : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("""t5-small""" )
_lowerCamelCase : int = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_lowerCamelCase : Tuple = tokenizer(__A , return_tensors="""pt""" ).input_ids
_lowerCamelCase : str = model.generate(__A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 0 |
lowerCAmelCase : Optional[Any] ="\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase : Union[str, Any] =[{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase : Optional[Any] ={
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : Dict ={
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'detr'
_snake_case = ['past_key_values']
_snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=3 , _UpperCamelCase : Optional[Any]=100 , _UpperCamelCase : int=6 , _UpperCamelCase : Optional[Any]=2048 , _UpperCamelCase : Union[str, Any]=8 , _UpperCamelCase : str=6 , _UpperCamelCase : List[str]=2048 , _UpperCamelCase : Optional[int]=8 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]="relu" , _UpperCamelCase : Tuple=256 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict="sine" , _UpperCamelCase : Dict="resnet50" , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=False , _UpperCamelCase : str=1 , _UpperCamelCase : int=5 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : str=5 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : List[str]=0.1 , **_UpperCamelCase : Any , ) ->str:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(_UpperCamelCase , _UpperCamelCase):
_lowerCamelCase : Dict = backbone_config.get("""model_type""")
_lowerCamelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Tuple = config_class.from_dict(_UpperCamelCase)
# set timm attributes to None
_lowerCamelCase : Union[str, Any] = None, None, None
_lowerCamelCase : Optional[int] = use_timm_backbone
_lowerCamelCase : Union[str, Any] = backbone_config
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[int] = num_queries
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Union[str, Any] = encoder_ffn_dim
_lowerCamelCase : Optional[Any] = encoder_layers
_lowerCamelCase : str = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : List[str] = decoder_layers
_lowerCamelCase : Tuple = decoder_attention_heads
_lowerCamelCase : List[str] = dropout
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : List[str] = activation_dropout
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : List[Any] = init_std
_lowerCamelCase : Optional[Any] = init_xavier_std
_lowerCamelCase : Optional[Any] = encoder_layerdrop
_lowerCamelCase : Optional[int] = decoder_layerdrop
_lowerCamelCase : Union[str, Any] = encoder_layers
_lowerCamelCase : List[Any] = auxiliary_loss
_lowerCamelCase : int = position_embedding_type
_lowerCamelCase : int = backbone
_lowerCamelCase : Optional[int] = use_pretrained_backbone
_lowerCamelCase : List[str] = dilation
# Hungarian matcher
_lowerCamelCase : int = class_cost
_lowerCamelCase : Optional[Any] = bbox_cost
_lowerCamelCase : int = giou_cost
# Loss coefficients
_lowerCamelCase : str = mask_loss_coefficient
_lowerCamelCase : List[str] = dice_loss_coefficient
_lowerCamelCase : Dict = bbox_loss_coefficient
_lowerCamelCase : Optional[int] = giou_loss_coefficient
_lowerCamelCase : int = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase)
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , _UpperCamelCase : PretrainedConfig , **_UpperCamelCase : List[str]) ->List[Any]:
"""simple docstring"""
return cls(backbone_config=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict[str, any]:
"""simple docstring"""
_lowerCamelCase : Tuple = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_lowerCamelCase : List[str] = self.backbone_config.to_dict()
_lowerCamelCase : List[Any] = self.__class__.model_type
return output
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : int) ->float:
"""simple docstring"""
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
return 12
| 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
_lowerCamelCase : int = """ylacombe/bark-small"""
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Tuple = """en_speaker_1"""
_lowerCamelCase : Tuple = """This is a test string"""
_lowerCamelCase : Optional[int] = """speaker_embeddings_path.json"""
_lowerCamelCase : int = """speaker_embeddings"""
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : int) ->List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : int = BarkProcessor(tokenizer=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCamelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCamelCase : int = 35
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : Optional[Any] = 8
_lowerCamelCase : str = {
"""semantic_prompt""": np.ones(_UpperCamelCase),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len)),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
_lowerCamelCase : List[str] = processor(text=self.input_string , voice_preset=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([])).tolist())
# test loading voice preset from npz file
_lowerCamelCase : int = os.path.join(self.tmpdirname , """file.npz""")
np.savez(_UpperCamelCase , **_UpperCamelCase)
_lowerCamelCase : str = processor(text=self.input_string , voice_preset=_UpperCamelCase)
_lowerCamelCase : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([])).tolist())
# test loading voice preset from the hub
_lowerCamelCase : List[str] = processor(text=self.input_string , voice_preset=self.voice_preset)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = BarkProcessor(tokenizer=_UpperCamelCase)
_lowerCamelCase : str = processor(text=self.input_string)
_lowerCamelCase : Union[str, Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 0 |
from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = KandinskyVaaControlnetImgaImgPipeline
_snake_case = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_snake_case = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_snake_case = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case = False
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->str:
"""simple docstring"""
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : Optional[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCamelCase : List[Any] = UNetaDConditionModel(**_UpperCamelCase)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : List[Any] = VQModel(**self.dummy_movq_kwargs)
return model
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.dummy_unet
_lowerCamelCase : Dict = self.dummy_movq
_lowerCamelCase : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCamelCase : Dict = DDIMScheduler(**_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
_lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
_UpperCamelCase)
# create init_image
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_lowerCamelCase : int = Image.fromarray(np.uinta(_UpperCamelCase)).convert("""RGB""").resize((256, 256))
# create hint
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase)
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : Any = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : Any = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : List[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = """cpu"""
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**_UpperCamelCase)
_lowerCamelCase : str = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : List[str] = pipe(**self.get_dummy_inputs(_UpperCamelCase))
_lowerCamelCase : Any = output.images
_lowerCamelCase : Dict = pipe(
**self.get_dummy_inputs(_UpperCamelCase) , return_dict=_UpperCamelCase , )[0]
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : str = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""")
_lowerCamelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
_lowerCamelCase : Optional[int] = init_image.resize((512, 512))
_lowerCamelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""")
_lowerCamelCase : int = torch.from_numpy(np.array(_UpperCamelCase)).float() / 255.0
_lowerCamelCase : Dict = hint.permute(2 , 0 , 1).unsqueeze(0)
_lowerCamelCase : Tuple = """A robot, 4k photo"""
_lowerCamelCase : Optional[int] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(_UpperCamelCase)
_lowerCamelCase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa)
_lowerCamelCase : List[str] = pipeline.to(_UpperCamelCase)
pipeline.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : str = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = pipe_prior(
_UpperCamelCase , image=_UpperCamelCase , strength=0.8_5 , generator=_UpperCamelCase , negative_prompt="""""" , ).to_tuple()
_lowerCamelCase : Optional[Any] = pipeline(
image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , hint=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
| 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase : List[Any] ="hf-internal-testing/tiny-random-bert"
lowerCAmelCase : List[Any] =os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowerCAmelCase : List[str] ="9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = cached_file(_UpperCamelCase , _UpperCamelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCamelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase)))
with open(os.path.join(_UpperCamelCase , """refs""" , """main""")) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , """snapshots""" , _UpperCamelCase , _UpperCamelCase))
self.assertTrue(os.path.isfile(_UpperCamelCase))
# File is cached at the same place the second time.
_lowerCamelCase : Optional[int] = cached_file(_UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
# Using a specific revision to test the full commit hash.
_lowerCamelCase : str = cached_file(_UpperCamelCase , _UpperCamelCase , revision="""9b8c223""")
self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , """snapshots""" , _UpperCamelCase , _UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(_UpperCamelCase , """is not a valid model identifier"""):
_lowerCamelCase : List[str] = cached_file("""tiny-random-bert""" , _UpperCamelCase)
with self.assertRaisesRegex(_UpperCamelCase , """is not a valid git identifier"""):
_lowerCamelCase : List[str] = cached_file(_UpperCamelCase , _UpperCamelCase , revision="""aaaa""")
with self.assertRaisesRegex(_UpperCamelCase , """does not appear to have a file named"""):
_lowerCamelCase : int = cached_file(_UpperCamelCase , """conf""")
def _SCREAMING_SNAKE_CASE ( self : int) ->str:
"""simple docstring"""
with self.assertRaisesRegex(_UpperCamelCase , """does not appear to have a file named"""):
_lowerCamelCase : List[str] = cached_file(_UpperCamelCase , """conf""")
with open(os.path.join(_UpperCamelCase , """refs""" , """main""")) as f:
_lowerCamelCase : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , """.no_exist""" , _UpperCamelCase , """conf""")))
_lowerCamelCase : List[str] = cached_file(_UpperCamelCase , """conf""" , _raise_exceptions_for_missing_entries=_UpperCamelCase)
self.assertIsNone(_UpperCamelCase)
_lowerCamelCase : str = cached_file(_UpperCamelCase , """conf""" , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase)
self.assertIsNone(_UpperCamelCase)
_lowerCamelCase : List[Any] = mock.Mock()
_lowerCamelCase : List[Any] = 500
_lowerCamelCase : Any = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_UpperCamelCase) as mock_head:
_lowerCamelCase : Optional[int] = cached_file(_UpperCamelCase , """conf""" , _raise_exceptions_for_connection_errors=_UpperCamelCase)
self.assertIsNone(_UpperCamelCase)
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCamelCase))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCamelCase))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt"""))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCamelCase , """is not a valid model identifier"""):
get_file_from_repo("""bert-base-case""" , _UpperCamelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCamelCase , """is not a valid git identifier"""):
get_file_from_repo("""bert-base-cased""" , _UpperCamelCase , revision="""ahaha""")
_lowerCamelCase : Union[str, Any] = get_file_from_repo("""bert-base-cased""" , _UpperCamelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Tuple = json.loads(open(_UpperCamelCase , """r""").read())
self.assertEqual(config["""hidden_size"""] , 768)
def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = Path(_UpperCamelCase) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCamelCase , """a.txt""") , str(_UpperCamelCase))
self.assertIsNone(get_file_from_repo(_UpperCamelCase , """b.txt"""))
| 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : int ={
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : Any ="https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : Union[str, Any] =BASE_URL + "/user"
# https://github.com/settings/tokens
lowerCAmelCase : Optional[int] =os.environ.get("USER_TOKEN", "")
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__A , headers=__A ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[Any] =get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = PegasusTokenizer
_snake_case = PegasusTokenizerFast
_snake_case = True
_snake_case = True
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : List[Any] = PegasusTokenizer(_UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""")
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Tuple) ->PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : List[Any]) ->Dict:
"""simple docstring"""
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """</s>"""
_lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase) , _UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase) , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<pad>""")
self.assertEqual(vocab_keys[1] , """</s>""")
self.assertEqual(vocab_keys[-1] , """v""")
self.assertEqual(len(_UpperCamelCase) , 1103)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_lowerCamelCase : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0]
_lowerCamelCase : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowerCamelCase : str = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_lowerCamelCase : Optional[int] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_lowerCamelCase : List[Any] = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_lowerCamelCase : Any = """To ensure a smooth flow of bank resolutions."""
_lowerCamelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_lowerCamelCase : Any = tokenizer([raw_input_str] , return_tensors=_UpperCamelCase).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["""This is going to be way too long.""" * 150, """short example"""]
_lowerCamelCase : Dict = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCamelCase : List[Any] = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""")
_lowerCamelCase : int = self._large_tokenizer(
text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""")
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCamelCase) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = PegasusTokenizer
_snake_case = PegasusTokenizerFast
_snake_case = True
_snake_case = True
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Union[str, Any] = PegasusTokenizer(_UpperCamelCase , offset=0 , mask_token_sent=_UpperCamelCase , mask_token="""[MASK]""")
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""")
def _SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCamelCase : List[Any]) ->PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Dict) ->List[Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
_lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname)
_lowerCamelCase : int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_lowerCamelCase : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0]
_lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase).input_ids[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : List[Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
_lowerCamelCase : Tuple = ["""not super long but more than 5 tokens""", """tiny"""]
_lowerCamelCase : Optional[int] = self._large_tokenizer(_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""")
_lowerCamelCase : List[str] = self._large_tokenizer(
text_target=_UpperCamelCase , max_length=5 , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="""pt""")
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCamelCase) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_lowerCamelCase : int = self._large_tokenizer(_UpperCamelCase).input_ids
self.assertListEqual(
_UpperCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 0 |
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : List[Any] = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Optional[int] = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : int ={
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'roc_bert'
def __init__( self : Optional[int] , _UpperCamelCase : Union[str, Any]=3_0522 , _UpperCamelCase : int=768 , _UpperCamelCase : Any=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3072 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=1E-1_2 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : int="absolute" , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Tuple=768 , _UpperCamelCase : int=910 , _UpperCamelCase : Optional[Any]=512 , _UpperCamelCase : List[str]=2_4858 , _UpperCamelCase : Tuple=True , **_UpperCamelCase : Optional[Any] , ) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = vocab_size
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : List[str] = use_cache
_lowerCamelCase : List[Any] = enable_pronunciation
_lowerCamelCase : List[str] = enable_shape
_lowerCamelCase : Dict = pronunciation_embed_dim
_lowerCamelCase : Optional[Any] = pronunciation_vocab_size
_lowerCamelCase : str = shape_embed_dim
_lowerCamelCase : List[Any] = shape_vocab_size
_lowerCamelCase : int = concat_input
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase)
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'decision_transformer'
_snake_case = ['past_key_values']
_snake_case = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , _UpperCamelCase : int=17 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : Optional[Any]=128 , _UpperCamelCase : Tuple=4096 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : List[str]=1 , _UpperCamelCase : str=1024 , _UpperCamelCase : str=3 , _UpperCamelCase : str=1 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Dict="relu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Union[str, Any]=1E-5 , _UpperCamelCase : Union[str, Any]=0.0_2 , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Tuple=5_0256 , _UpperCamelCase : Optional[Any]=5_0256 , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Union[str, Any]=False , **_UpperCamelCase : Union[str, Any] , ) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = state_dim
_lowerCamelCase : str = act_dim
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Any = max_ep_len
_lowerCamelCase : Union[str, Any] = action_tanh
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Dict = n_positions
_lowerCamelCase : Dict = n_layer
_lowerCamelCase : str = n_head
_lowerCamelCase : Union[str, Any] = n_inner
_lowerCamelCase : Any = activation_function
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : int = embd_pdrop
_lowerCamelCase : Dict = attn_pdrop
_lowerCamelCase : List[str] = layer_norm_epsilon
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = scale_attn_weights
_lowerCamelCase : str = use_cache
_lowerCamelCase : str = scale_attn_by_inverse_layer_idx
_lowerCamelCase : int = reorder_and_upcast_attn
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : Dict = eos_token_id
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [0] * len(__A )
_lowerCamelCase : int = []
_lowerCamelCase : List[Any] = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
_lowerCamelCase : str = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCamelCase : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
lowerCAmelCase : Optional[Any] ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 10.0807) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 10.0807) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 124.52_2994_9951_1719) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 700 | import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[Any] = MgpstrTokenizer
_snake_case : Optional[int] = False
_snake_case : List[Any] = {}
_snake_case : Dict = False
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
"""simple docstring"""
super().setUp()
# fmt: off
_lowerCamelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
def _SCREAMING_SNAKE_CASE ( self : Any , **_UpperCamelCase : List[Any]) ->int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[str]) ->str:
"""simple docstring"""
_lowerCamelCase : str = """tester"""
_lowerCamelCase : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""")
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.get_tokenizers(do_lower_case=_UpperCamelCase)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
_lowerCamelCase : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token})
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=_UpperCamelCase)
self.assertEqual(len(_UpperCamelCase) , 1)
_lowerCamelCase : Any = tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
self.assertTrue(special_token not in decoded)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
_lowerCamelCase : List[Any] = self.get_input_output_texts(_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer.tokenize(_UpperCamelCase)
_lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(_UpperCamelCase)
_lowerCamelCase : Any = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase)
self.assertNotEqual(len(_UpperCamelCase) , 0)
_lowerCamelCase : str = tokenizer.decode(_UpperCamelCase)
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase)
self.assertEqual(text_a.replace(""" """ , """""") , _UpperCamelCase)
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""")
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""")
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
pass
| 701 | def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
if n == 0:
return 0
_lowerCamelCase : Tuple = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max(
__A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) )
return max_revue
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
_lowerCamelCase : Optional[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__A , __A , __A )
def A__ ( __A , __A , __A ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : int = float("""-inf""" )
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = max(
__A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , )
_lowerCamelCase : Optional[Any] = max_revenue
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
_enforce_args(__A , __A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_lowerCamelCase : Any = 0
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCamelCase : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : int = max_revenue_i
return max_rev[n]
def A__ ( __A , __A ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__A )
if n > len(__A ):
_lowerCamelCase : List[Any] = (
"""Each integral piece of rod must have a corresponding price. """
F"""Got n = {n} but length of prices = {len(__A )}"""
)
raise ValueError(__A )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(__A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Tuple = 36
_lowerCamelCase : Any = top_down_cut_rod(__A , __A )
_lowerCamelCase : Dict = bottom_up_cut_rod(__A , __A )
_lowerCamelCase : List[str] = naive_cut_rod_recursive(__A , __A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 702 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = emb.weight.shape
_lowerCamelCase : List[str] = nn.Linear(__A , __A , bias=__A )
_lowerCamelCase : Optional[Any] = emb.weight.data
return lin_layer
def A__ ( __A , __A="facebook/mbart-large-en-ro" , __A=False , __A=False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = torch.load(__A , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__A )
_lowerCamelCase : Optional[Any] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCamelCase : str = MBartConfig.from_pretrained(__A , vocab_size=__A )
if mbart_aa and finetuned:
_lowerCamelCase : Union[str, Any] = """relu"""
_lowerCamelCase : List[Any] = state_dict["""decoder.embed_tokens.weight"""]
_lowerCamelCase : Any = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
_lowerCamelCase : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
lowerCAmelCase : Dict =parser.parse_args()
lowerCAmelCase : Any =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
lowerCAmelCase : Union[str, Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Any = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_lowerCamelCase : int = bs[:]
_lowerCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
_lowerCamelCase : Any = [chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = set()
_lowerCamelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : str = char
return pairs
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['input_ids', 'attention_mask']
def __init__( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any]="replace" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Any="</s>" , _UpperCamelCase : Dict="<s>" , _UpperCamelCase : str="<unk>" , _UpperCamelCase : str="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple=False , **_UpperCamelCase : int , ) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else bos_token
_lowerCamelCase : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else eos_token
_lowerCamelCase : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else sep_token
_lowerCamelCase : Any = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else cls_token
_lowerCamelCase : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else unk_token
_lowerCamelCase : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Any = json.load(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : int = errors # how to handle errors in decoding
_lowerCamelCase : Any = bytes_to_unicode()
_lowerCamelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding="""utf-8""") as merges_handle:
_lowerCamelCase : Optional[int] = merges_handle.read().split("""\n""")[1:-1]
_lowerCamelCase : Optional[int] = [tuple(merge.split()) for merge in bpe_merges]
_lowerCamelCase : int = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase : Optional[Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
"""simple docstring"""
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str) ->List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowerCamelCase : List[str] = tuple(_UpperCamelCase)
_lowerCamelCase : str = get_pairs(_UpperCamelCase)
if not pairs:
return token
while True:
_lowerCamelCase : int = min(_UpperCamelCase , key=lambda _UpperCamelCase: self.bpe_ranks.get(_UpperCamelCase , float("""inf""")))
if bigram not in self.bpe_ranks:
break
_lowerCamelCase : Optional[Any] = bigram
_lowerCamelCase : int = []
_lowerCamelCase : Any = 0
while i < len(_UpperCamelCase):
try:
_lowerCamelCase : Union[str, Any] = word.index(_UpperCamelCase , _UpperCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCamelCase : Optional[int] = j
if word[i] == first and i < len(_UpperCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCamelCase : Optional[Any] = tuple(_UpperCamelCase)
_lowerCamelCase : List[Any] = new_word
if len(_UpperCamelCase) == 1:
break
else:
_lowerCamelCase : Dict = get_pairs(_UpperCamelCase)
_lowerCamelCase : List[Any] = """ """.join(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = []
for token in re.findall(self.pat , _UpperCamelCase):
_lowerCamelCase : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase).split(""" """))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : List[Any]) ->List[Any]:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = """""".join(_UpperCamelCase)
_lowerCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
_lowerCamelCase : Dict = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Any = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
_lowerCamelCase : Union[str, Any] = 0
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""")
_lowerCamelCase : Dict = token_index
writer.write(""" """.join(_UpperCamelCase) + """\n""")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
_lowerCamelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase)) + [1]
return [1] + ([0] * len(_UpperCamelCase)) + [1, 1] + ([0] * len(_UpperCamelCase)) + [1]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False , **_UpperCamelCase : str) ->int:
"""simple docstring"""
_lowerCamelCase : str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase) > 0 and not text[0].isspace()):
_lowerCamelCase : Dict = """ """ + text
return (text, kwargs)
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Dict ={
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'cvt'
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=3 , _UpperCamelCase : List[str]=[7, 3, 3] , _UpperCamelCase : Dict=[4, 2, 2] , _UpperCamelCase : List[Any]=[2, 1, 1] , _UpperCamelCase : Union[str, Any]=[64, 192, 384] , _UpperCamelCase : Optional[int]=[1, 3, 6] , _UpperCamelCase : Dict=[1, 2, 10] , _UpperCamelCase : List[Any]=[4.0, 4.0, 4.0] , _UpperCamelCase : str=[0.0, 0.0, 0.0] , _UpperCamelCase : Any=[0.0, 0.0, 0.0] , _UpperCamelCase : List[str]=[0.0, 0.0, 0.1] , _UpperCamelCase : int=[True, True, True] , _UpperCamelCase : Tuple=[False, False, True] , _UpperCamelCase : str=["dw_bn", "dw_bn", "dw_bn"] , _UpperCamelCase : List[Any]=[3, 3, 3] , _UpperCamelCase : Tuple=[1, 1, 1] , _UpperCamelCase : List[str]=[2, 2, 2] , _UpperCamelCase : List[str]=[1, 1, 1] , _UpperCamelCase : int=[1, 1, 1] , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : int=1E-1_2 , **_UpperCamelCase : List[str] , ) ->List[Any]:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : int = num_channels
_lowerCamelCase : Tuple = patch_sizes
_lowerCamelCase : List[Any] = patch_stride
_lowerCamelCase : List[Any] = patch_padding
_lowerCamelCase : int = embed_dim
_lowerCamelCase : Dict = num_heads
_lowerCamelCase : Tuple = depth
_lowerCamelCase : Any = mlp_ratio
_lowerCamelCase : Tuple = attention_drop_rate
_lowerCamelCase : Dict = drop_rate
_lowerCamelCase : Any = drop_path_rate
_lowerCamelCase : Dict = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : List[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Dict = stride_kv
_lowerCamelCase : Any = padding_q
_lowerCamelCase : int = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
| 705 | from __future__ import annotations
lowerCAmelCase : int =[]
def A__ ( __A , __A , __A ):
'''simple docstring'''
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( __A , __A ):
'''simple docstring'''
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
_lowerCamelCase : int = 1
solve(__A , row + 1 )
_lowerCamelCase : List[str] = 0
return False
def A__ ( __A ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : int =8
lowerCAmelCase : Union[str, Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 15 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase : Optional[int] =re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowerCAmelCase : str =10
lowerCAmelCase : Optional[Any] =256
def A__ ( __A ):
'''simple docstring'''
if len(__A ) < MIN_NUM_TOKENS:
return None
_lowerCamelCase : Union[str, Any] = MinHash(num_perm=__A )
for token in set(__A ):
min_hash.update(token.encode() )
return min_hash
def A__ ( __A ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(__A ) if len(t.strip() ) > 0}
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , *,
_UpperCamelCase : float = 0.8_5 , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = duplication_jaccard_threshold
_lowerCamelCase : List[str] = NUM_PERM
_lowerCamelCase : Any = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
_lowerCamelCase : Tuple = defaultdict(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : MinHash) ->None:
"""simple docstring"""
_lowerCamelCase : List[str] = self._index.query(_UpperCamelCase)
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""")
return
self._index.insert(_UpperCamelCase , _UpperCamelCase)
if len(_UpperCamelCase) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCamelCase)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->List[List[Dict]]:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCamelCase : Optional[int] = [base] + list(_UpperCamelCase)
# reformat the cluster to be a list of dict
_lowerCamelCase : Dict = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_UpperCamelCase)
return duplicate_clusters
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Union[str, Any]) ->None:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_duplicate_clusters()
with open(_UpperCamelCase , """w""") as f:
json.dump(_UpperCamelCase , _UpperCamelCase)
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = element
_lowerCamelCase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def A__ ( __A ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = DuplicationIndex(duplication_jaccard_threshold=__A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A ) ) , max_queue_size=100 ) ):
di.add(__A , __A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = get_tokens(__A )
_lowerCamelCase : Any = get_tokens(__A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase : Optional[int] =None
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = []
for elementa in cluster:
_lowerCamelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCamelCase : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__A , __A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCamelCase : str = 1
extremes.append(__A )
return extremes
def A__ ( __A , __A , __A ):
'''simple docstring'''
global _shared_dataset
_lowerCamelCase : int = dataset
_lowerCamelCase : str = []
_lowerCamelCase : Optional[Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=__A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A ) , ):
extremes_list.append(__A )
return extremes_list
def A__ ( __A , __A = 0.85 ):
'''simple docstring'''
_lowerCamelCase : Tuple = make_duplicate_clusters(__A , __A )
_lowerCamelCase : Optional[int] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = find_extremes(__A , __A , __A )
for extremes in extremes_clusters:
for element in extremes:
_lowerCamelCase : Optional[Any] = element
_lowerCamelCase : Any = duplicate_indices - set(extreme_dict.keys() )
_lowerCamelCase : List[Any] = dataset.filter(lambda __A , __A : idx not in remove_indices , with_indices=__A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCamelCase : str = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCamelCase : List[str] = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(__A )}""" )
print(F"""Number of duplicate clusters: {len(__A )}""" )
print(F"""Files in duplicate cluster: {len(__A )}""" )
print(F"""Unique files in duplicate cluster: {len(__A )}""" )
print(F"""Filtered dataset size: {len(__A )}""" )
return ds_filter, duplicate_clusters
| 706 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase : int ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A__ ( __A , __A , __A , __A=None ):
'''simple docstring'''
# Initialise PyTorch model
_lowerCamelCase : Tuple = XLNetConfig.from_json_file(__A )
_lowerCamelCase : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : int = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
_lowerCamelCase : Dict = finetuning_task
_lowerCamelCase : Optional[Any] = XLNetForQuestionAnswering(__A )
else:
_lowerCamelCase : Any = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = os.path.join(__A , __A )
_lowerCamelCase : Any = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCAmelCase : Union[str, Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 15 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A__ ( __A , __A=False ):
try:
_lowerCamelCase : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCamelCase : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCamelCase : List[str] = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
lowerCAmelCase : str =parse_flag_from_env("RUN_SLOW", default=False)
def A__ ( __A ):
return unittest.skip("""Test was skipped""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__A )
def A__ ( __A=None , __A=None ):
if test_case is None:
return partial(__A , version=__A )
return unittest.skipUnless(is_torch_version(""">=""" , __A ) , F"""test requires torch version >= {version}""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__A )
def A__ ( __A ):
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__A )
lowerCAmelCase : Union[str, Any] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A__ ( __A ):
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__A )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = tempfile.mkdtemp()
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int) ->List[Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob("""**/*"""):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[mock.Mock, List[mock.Mock]]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A__ ( __A ):
_lowerCamelCase : Tuple = AcceleratorState()
_lowerCamelCase : Tuple = tensor[None].clone().to(state.device )
_lowerCamelCase : int = gather(__A ).cpu()
_lowerCamelCase : Optional[int] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __A ):
return False
return True
class __snake_case :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = returncode
_lowerCamelCase : Union[str, Any] = stdout
_lowerCamelCase : List[Any] = stderr
async def A__ ( __A , __A ):
while True:
_lowerCamelCase : List[str] = await stream.readline()
if line:
callback(__A )
else:
break
async def A__ ( __A , __A=None , __A=None , __A=None , __A=False , __A=False ):
if echo:
print("""\nRunning: """ , """ """.join(__A ) )
_lowerCamelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCamelCase : int = []
_lowerCamelCase : int = []
def tee(__A , __A , __A , __A="" ):
_lowerCamelCase : Dict = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A , __A , file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __A : tee(__A , __A , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __A : tee(__A , __A , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__A , )
return _RunOutput(await p.wait() , __A , __A )
def A__ ( __A , __A=None , __A=None , __A=180 , __A=False , __A=True ):
_lowerCamelCase : List[Any] = asyncio.get_event_loop()
_lowerCamelCase : Any = loop.run_until_complete(
_stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) )
_lowerCamelCase : Optional[int] = """ """.join(__A )
if result.returncode > 0:
_lowerCamelCase : Optional[Any] = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
pass
def A__ ( __A , __A=False ):
try:
_lowerCamelCase : Optional[Any] = subprocess.check_output(__A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__A , """decode""" ):
_lowerCamelCase : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(__A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 707 | def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for ch in input_str:
_lowerCamelCase : Optional[Any] = ord(__A )
_lowerCamelCase : List[str] = pow(2 , __A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Union[str, Any] ={
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 0 |
import doctest
from collections import deque
import numpy as np
class __snake_case :
'''simple docstring'''
def __init__( self : Any) ->None:
"""simple docstring"""
_lowerCamelCase : int = [2, 1, 2, -1]
_lowerCamelCase : Union[str, Any] = [1, 2, 3, 4]
def _SCREAMING_SNAKE_CASE ( self : int) ->list[float]:
"""simple docstring"""
_lowerCamelCase : Tuple = len(self.first_signal)
_lowerCamelCase : Tuple = len(self.second_signal)
_lowerCamelCase : Dict = max(_UpperCamelCase , _UpperCamelCase)
# create a zero matrix of max_length x max_length
_lowerCamelCase : Tuple = [[0] * max_length for i in range(_UpperCamelCase)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCamelCase):
_lowerCamelCase : Tuple = deque(self.second_signal)
rotated_signal.rotate(_UpperCamelCase)
for j, item in enumerate(_UpperCamelCase):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowerCamelCase : Dict = np.matmul(np.transpose(_UpperCamelCase) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(_UpperCamelCase , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 709 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : str , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_lowerCamelCase : Optional[Any] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCamelCase : Any = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[int] = resample
_lowerCamelCase : int = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : Union[str, Any] = do_rescale
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
_UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str] , ) ->np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = get_size_dict(_UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(_UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->str:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Tuple=None , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) ->PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] = resample if resample is not None else self.resample
_lowerCamelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Optional[int] = get_size_dict(_UpperCamelCase)
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict = get_size_dict(_UpperCamelCase , param_name="""crop_size""")
_lowerCamelCase : int = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_resize:
_lowerCamelCase : Any = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase) for image in images]
if do_center_crop:
_lowerCamelCase : str = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_normalize:
_lowerCamelCase : List[str] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 0 |
lowerCAmelCase : str =[
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : str = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Union[str, Any] = 0
while place < len(__A ):
if (place + 1 < len(__A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = []
for arabic, roman in ROMAN:
(_lowerCamelCase) : List[str] = divmod(__A , __A )
result.append(roman * factor )
if number == 0:
break
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15 | 0 |
import numpy as np
import qiskit
def A__ ( __A = 8 , __A = None ):
'''simple docstring'''
_lowerCamelCase : Any = np.random.default_rng(seed=__A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCamelCase : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCamelCase : Dict = rng.integers(2 , size=__A )
# The set of states Alice will prepare.
_lowerCamelCase : Optional[int] = rng.integers(2 , size=__A )
# Measurement basis for Bob's qubits.
_lowerCamelCase : Dict = rng.integers(2 , size=__A )
# Quantum Circuit to simulate BB84
_lowerCamelCase : List[Any] = qiskit.QuantumCircuit(__A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__A ):
if alice_state[index] == 1:
bbaa_circ.x(__A )
if alice_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__A ):
if bob_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCamelCase : str = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCamelCase : int = qiskit.execute(__A , __A , shots=1 , seed_simulator=__A )
# Returns the result of measurement.
_lowerCamelCase : Optional[Any] = job.result().get_counts(__A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCamelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__A , __A , __A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCamelCase : Any = gen_key[:key_len] if len(__A ) >= key_len else gen_key.ljust(__A , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : int ={
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'data2vec-audio'
def __init__( self : Optional[int] , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : Optional[Any]=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : List[Any]=3072 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : int=1E-5 , _UpperCamelCase : str="gelu" , _UpperCamelCase : List[str]=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Dict=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : List[str]=19 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : str=0.0_5 , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : str=10 , _UpperCamelCase : Any=0 , _UpperCamelCase : Any="sum" , _UpperCamelCase : str=False , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[Any]=256 , _UpperCamelCase : List[str]=(512, 512, 512, 512, 1500) , _UpperCamelCase : Optional[Any]=(5, 3, 3, 1, 1) , _UpperCamelCase : Optional[int]=(1, 2, 3, 1, 1) , _UpperCamelCase : List[str]=512 , _UpperCamelCase : Tuple=0 , _UpperCamelCase : int=1 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : str=False , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : str=None , **_UpperCamelCase : List[Any] , ) ->str:
"""simple docstring"""
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[str] = feat_extract_activation
_lowerCamelCase : Union[str, Any] = list(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = list(_UpperCamelCase)
_lowerCamelCase : Dict = list(_UpperCamelCase)
_lowerCamelCase : int = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[str] = num_conv_pos_embedding_groups
_lowerCamelCase : int = conv_pos_kernel_size
_lowerCamelCase : Dict = len(self.conv_dim)
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_dropout
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : Dict = activation_dropout
_lowerCamelCase : Tuple = feat_proj_dropout
_lowerCamelCase : Optional[Any] = final_dropout
_lowerCamelCase : str = layerdrop
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : Any = mask_time_length
_lowerCamelCase : str = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : str = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : int = ctc_loss_reduction
_lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# adapter
_lowerCamelCase : List[str] = add_adapter
_lowerCamelCase : Dict = adapter_kernel_size
_lowerCamelCase : str = adapter_stride
_lowerCamelCase : Union[str, Any] = num_adapter_layers
_lowerCamelCase : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(_UpperCamelCase)
_lowerCamelCase : List[Any] = list(_UpperCamelCase)
_lowerCamelCase : Optional[int] = list(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
return math.prod(self.conv_stride)
| 713 | import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 0 |
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowerCamelCase : List[str] = mf_knapsack(i - 1 , __A , __A , __A )
else:
_lowerCamelCase : Any = max(
mf_knapsack(i - 1 , __A , __A , __A ) , mf_knapsack(i - 1 , __A , __A , j - wt[i - 1] ) + val[i - 1] , )
_lowerCamelCase : Tuple = val
return f[i][j]
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowerCamelCase : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowerCamelCase : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def A__ ( __A , __A , __A ):
'''simple docstring'''
if not (isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
_lowerCamelCase : List[str] = len(__A )
if num_items != len(__A ):
_lowerCamelCase : Union[str, Any] = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(__A )} values"""
)
raise ValueError(__A )
for i in range(__A ):
if not isinstance(wt[i] , __A ):
_lowerCamelCase : Tuple = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(__A )
_lowerCamelCase : Union[str, Any] = knapsack(__A , __A , __A , __A )
_lowerCamelCase : set = set()
_construct_solution(__A , __A , __A , __A , __A )
return optimal_val, example_optional_set
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__A , __A , i - 1 , __A , __A )
else:
optimal_set.add(__A )
_construct_solution(__A , __A , i - 1 , j - wt[i - 1] , __A )
if __name__ == "__main__":
lowerCAmelCase : Any =[3, 2, 4, 4]
lowerCAmelCase : str =[4, 3, 2, 3]
lowerCAmelCase : Dict =4
lowerCAmelCase : str =6
lowerCAmelCase : List[str] =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase : Union[str, Any] =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase : Optional[Any] =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 714 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase , _lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 15 | 0 |
from __future__ import annotations
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = list(range(len(__A ) ) )
_lowerCamelCase : List[Any] = [v / w for v, w in zip(__A , __A )]
index.sort(key=lambda __A : ratio[i] , reverse=__A )
_lowerCamelCase : float = 0
_lowerCamelCase : list[float] = [0] * len(__A )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase : str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 716 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase : Tuple =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = os.path.dirname(os.path.realpath(__A ) )
_lowerCamelCase : Tuple = os.path.join(__A , """words.txt""" )
_lowerCamelCase : int = """"""
with open(__A ) as f:
_lowerCamelCase : Union[str, Any] = f.readline()
_lowerCamelCase : Tuple = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_lowerCamelCase : Optional[Any] = [
word
for word in [sum(ord(__A ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__A )
if __name__ == "__main__":
print(solution())
| 717 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[Any]) ->Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Any) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Any , **_UpperCamelCase : str) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[Any]) ->int:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[int]) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , *_UpperCamelCase : str , **_UpperCamelCase : Any) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , *_UpperCamelCase : List[str] , **_UpperCamelCase : Dict) ->Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[str]) ->str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Dict) ->Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_UpperCamelCase : Any , **_UpperCamelCase : str) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , *_UpperCamelCase : Dict , **_UpperCamelCase : int) ->int:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : int =logging.get_logger(__name__)
def A__ ( __A ):
'''simple docstring'''
# initialize config
if "resnet-50" in model_name:
_lowerCamelCase : List[Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
_lowerCamelCase : str = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
_lowerCamelCase : Optional[Any] = DetrConfig(use_timm_backbone=__A , backbone_config=__A )
# set label attributes
_lowerCamelCase : Dict = """panoptic""" in model_name
if is_panoptic:
_lowerCamelCase : Any = 250
else:
_lowerCamelCase : List[str] = 91
_lowerCamelCase : Optional[int] = """huggingface/label-files"""
_lowerCamelCase : int = """coco-detection-id2label.json"""
_lowerCamelCase : int = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
_lowerCamelCase : Any = {int(__A ): v for k, v in idalabel.items()}
_lowerCamelCase : str = idalabel
_lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = state_dict.pop(__A )
_lowerCamelCase : Dict = val
def A__ ( __A , __A=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = """"""
if is_panoptic:
_lowerCamelCase : Tuple = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : List[str] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:256, :]
_lowerCamelCase : int = in_proj_bias[:256]
_lowerCamelCase : Optional[Any] = in_proj_weight[256:512, :]
_lowerCamelCase : Tuple = in_proj_bias[256:512]
_lowerCamelCase : List[Any] = in_proj_weight[-256:, :]
_lowerCamelCase : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:256, :]
_lowerCamelCase : List[str] = in_proj_bias[:256]
_lowerCamelCase : Dict = in_proj_weight[256:512, :]
_lowerCamelCase : Union[str, Any] = in_proj_bias[256:512]
_lowerCamelCase : Optional[Any] = in_proj_weight[-256:, :]
_lowerCamelCase : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowerCamelCase : Union[str, Any] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_lowerCamelCase : Any = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCamelCase : int = in_proj_weight_cross_attn[:256, :]
_lowerCamelCase : Any = in_proj_bias_cross_attn[:256]
_lowerCamelCase : List[str] = in_proj_weight_cross_attn[256:512, :]
_lowerCamelCase : Any = in_proj_bias_cross_attn[256:512]
_lowerCamelCase : Any = in_proj_weight_cross_attn[-256:, :]
_lowerCamelCase : Optional[int] = in_proj_bias_cross_attn[-256:]
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCamelCase : str = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A__ ( __A , __A=None , __A=False ):
'''simple docstring'''
_lowerCamelCase : List[str] = get_detr_config(__A )
# load original model from torch hub
_lowerCamelCase : Any = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"""Converting model {model_name}...""" )
_lowerCamelCase : Optional[int] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=__A ).eval()
_lowerCamelCase : Optional[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__A ):
if is_panoptic:
_lowerCamelCase : Optional[int] = """detr.""" + src
rename_key(__A , __A , __A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A , is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : str = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_lowerCamelCase : Tuple = state_dict.pop(__A )
_lowerCamelCase : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : int = state_dict.pop(__A )
_lowerCamelCase : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_lowerCamelCase : List[str] = state_dict.pop(__A )
_lowerCamelCase : Optional[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_lowerCamelCase : Any = state_dict.pop(__A )
_lowerCamelCase : Any = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : List[str] = DetrForSegmentation(__A ) if is_panoptic else DetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
# verify our conversion on an image
_lowerCamelCase : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_lowerCamelCase : List[Any] = DetrImageProcessor(format=__A )
_lowerCamelCase : Optional[int] = processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCamelCase : Optional[int] = encoding["""pixel_values"""]
_lowerCamelCase : List[Any] = detr(__A )
_lowerCamelCase : Tuple = model(__A )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
lowerCAmelCase : int =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : Optional[Any] =["small", "medium", "large"]
lowerCAmelCase : Dict ="lm_head.decoder.weight"
lowerCAmelCase : str ="lm_head.weight"
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : List[Any] = torch.load(__A )
_lowerCamelCase : Tuple = d.pop(__A )
os.makedirs(__A , exist_ok=__A )
torch.save(__A , os.path.join(__A , __A ) )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
lowerCAmelCase : Optional[int] =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : Union[str, Any] =os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
lowerCAmelCase : str =F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int=0.0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : str = "layer_norm" , _UpperCamelCase : bool = False , ) ->Tuple:
"""simple docstring"""
super().__init__()
_lowerCamelCase : int = only_cross_attention
_lowerCamelCase : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
_lowerCamelCase : str = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""")
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_lowerCamelCase : Any = AdaLayerNorm(_UpperCamelCase , _UpperCamelCase)
elif self.use_ada_layer_norm_zero:
_lowerCamelCase : int = AdaLayerNormZero(_UpperCamelCase , _UpperCamelCase)
else:
_lowerCamelCase : Dict = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase)
_lowerCamelCase : Tuple = Attention(
query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_UpperCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_lowerCamelCase : Tuple = (
AdaLayerNorm(_UpperCamelCase , _UpperCamelCase)
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase)
)
_lowerCamelCase : Dict = Attention(
query_dim=_UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , upcast_attention=_UpperCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
# 3. Feed-forward
_lowerCamelCase : Dict = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase)
_lowerCamelCase : List[str] = FeedForward(_UpperCamelCase , dropout=_UpperCamelCase , activation_fn=_UpperCamelCase , final_dropout=_UpperCamelCase)
# let chunk size default to None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[Any] = 0
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : int) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = chunk_size
_lowerCamelCase : List[Any] = dim
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , _UpperCamelCase : Dict[str, Any] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , ) ->Optional[int]:
"""simple docstring"""
if self.use_ada_layer_norm:
_lowerCamelCase : List[Any] = self.norma(_UpperCamelCase , _UpperCamelCase)
elif self.use_ada_layer_norm_zero:
_lowerCamelCase : str = self.norma(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hidden_dtype=hidden_states.dtype)
else:
_lowerCamelCase : List[str] = self.norma(_UpperCamelCase)
_lowerCamelCase : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_lowerCamelCase : List[str] = self.attna(
_UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
if self.use_ada_layer_norm_zero:
_lowerCamelCase : List[str] = gate_msa.unsqueeze(1) * attn_output
_lowerCamelCase : Optional[Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_lowerCamelCase : List[Any] = (
self.norma(_UpperCamelCase , _UpperCamelCase) if self.use_ada_layer_norm else self.norma(_UpperCamelCase)
)
_lowerCamelCase : List[Any] = self.attna(
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : Tuple = attn_output + hidden_states
# 3. Feed-forward
_lowerCamelCase : List[str] = self.norma(_UpperCamelCase)
if self.use_ada_layer_norm_zero:
_lowerCamelCase : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""")
_lowerCamelCase : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_lowerCamelCase : List[str] = torch.cat(
[self.ff(_UpperCamelCase) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
_lowerCamelCase : List[Any] = self.ff(_UpperCamelCase)
if self.use_ada_layer_norm_zero:
_lowerCamelCase : str = gate_mlp.unsqueeze(1) * ff_output
_lowerCamelCase : int = ff_output + hidden_states
return hidden_states
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 4 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : str = "geglu" , _UpperCamelCase : bool = False , ) ->int:
"""simple docstring"""
super().__init__()
_lowerCamelCase : List[str] = int(dim * mult)
_lowerCamelCase : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_lowerCamelCase : int = GELU(_UpperCamelCase , _UpperCamelCase)
if activation_fn == "gelu-approximate":
_lowerCamelCase : Any = GELU(_UpperCamelCase , _UpperCamelCase , approximate="""tanh""")
elif activation_fn == "geglu":
_lowerCamelCase : Union[str, Any] = GEGLU(_UpperCamelCase , _UpperCamelCase)
elif activation_fn == "geglu-approximate":
_lowerCamelCase : Optional[Any] = ApproximateGELU(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = nn.ModuleList([])
# project in
self.net.append(_UpperCamelCase)
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase))
# project out
self.net.append(nn.Linear(_UpperCamelCase , _UpperCamelCase))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Tuple) ->Optional[Any]:
"""simple docstring"""
for module in self.net:
_lowerCamelCase : Optional[int] = module(_UpperCamelCase)
return hidden_states
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str = "none") ->Any:
"""simple docstring"""
super().__init__()
_lowerCamelCase : int = nn.Linear(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = approximate
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Optional[int]) ->List[str]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.proj(_UpperCamelCase)
_lowerCamelCase : List[str] = self.gelu(_UpperCamelCase)
return hidden_states
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int) ->Any:
"""simple docstring"""
super().__init__()
_lowerCamelCase : Tuple = nn.Linear(_UpperCamelCase , dim_out * 2)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : int) ->List[str]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = self.proj(_UpperCamelCase).chunk(2 , dim=-1)
return hidden_states * self.gelu(_UpperCamelCase)
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int) ->List[str]:
"""simple docstring"""
super().__init__()
_lowerCamelCase : Any = nn.Linear(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = self.proj(_UpperCamelCase)
return x * torch.sigmoid(1.7_0_2 * x)
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict) ->Any:
"""simple docstring"""
super().__init__()
_lowerCamelCase : Dict = nn.Embedding(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = nn.SiLU()
_lowerCamelCase : Union[str, Any] = nn.Linear(_UpperCamelCase , embedding_dim * 2)
_lowerCamelCase : Optional[Any] = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.linear(self.silu(self.emb(_UpperCamelCase)))
_lowerCamelCase : Optional[int] = torch.chunk(_UpperCamelCase , 2)
_lowerCamelCase : Dict = self.norm(_UpperCamelCase) * (1 + scale) + shift
return x
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]) ->Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase : str = CombinedTimestepLabelEmbeddings(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = nn.SiLU()
_lowerCamelCase : Optional[Any] = nn.Linear(_UpperCamelCase , 6 * embedding_dim , bias=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase , eps=1E-6)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=None) ->int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase , _UpperCamelCase , hidden_dtype=_UpperCamelCase)))
_lowerCamelCase : int = emb.chunk(6 , dim=1)
_lowerCamelCase : List[Any] = self.norm(_UpperCamelCase) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : float = 1E-5) ->List[str]:
"""simple docstring"""
super().__init__()
_lowerCamelCase : int = num_groups
_lowerCamelCase : List[Any] = eps
if act_fn is None:
_lowerCamelCase : str = None
else:
_lowerCamelCase : List[Any] = get_activation(_UpperCamelCase)
_lowerCamelCase : Any = nn.Linear(_UpperCamelCase , out_dim * 2)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : Dict) ->Dict:
"""simple docstring"""
if self.act:
_lowerCamelCase : Optional[Any] = self.act(_UpperCamelCase)
_lowerCamelCase : Any = self.linear(_UpperCamelCase)
_lowerCamelCase : str = emb[:, :, None, None]
_lowerCamelCase : List[Any] = emb.chunk(2 , dim=1)
_lowerCamelCase : str = F.group_norm(_UpperCamelCase , self.num_groups , eps=self.eps)
_lowerCamelCase : List[Any] = x * (1 + scale) + shift
return x
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
_lowerCAmelCase = ["""names""", """prefix"""]
_lowerCAmelCase = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
_lowerCAmelCase = ["""encoding_errors""", """on_bad_lines"""]
_lowerCAmelCase = ["""date_format"""]
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
_UpperCAmelCase = ","
_UpperCAmelCase = None
_UpperCAmelCase = "infer"
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = "."
_UpperCAmelCase = None
_UpperCAmelCase = '"'
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = 1_00_00
_UpperCAmelCase = None
_UpperCAmelCase = "strict"
_UpperCAmelCase = "error"
_UpperCAmelCase = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.delimiter is not None:
_lowerCAmelCase : str = self.delimiter
if self.column_names is not None:
_lowerCAmelCase : Dict = self.column_names
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,_A ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase = CsvConfig
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A ,(str, list, tuple) ):
_lowerCAmelCase : int = data_files
if isinstance(_A ,_A ):
_lowerCAmelCase : Optional[int] = [files]
_lowerCAmelCase : Dict = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_A ,_A ):
_lowerCAmelCase : List[Any] = [files]
_lowerCAmelCase : Dict = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A ,gen_kwargs={'files': files} ) )
return splits
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.config.features is not None:
_lowerCAmelCase : Any = self.config.features.arrow_schema
if all(not require_storage_cast(_A ) for feature in self.config.features.values() ):
# cheaper cast
_lowerCAmelCase : str = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=_A )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowerCAmelCase : List[str] = table_cast(_A ,_A )
return pa_table
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowerCAmelCase : Dict = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_A ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
_lowerCAmelCase : Any = pd.read_csv(_A ,iterator=_A ,dtype=_A ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_A ):
_lowerCAmelCase : Union[str, Any] = pa.Table.from_pandas(_A )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_A )}: {e}""" )
raise
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
from math import factorial
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_lowerCAmelCase : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_lowerCAmelCase : Union[str, Any] = float(factorial(_lowerCamelCase ) )
coefficient /= factorial(_lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""MaskFormerFeatureExtractor"""]
_lowerCAmelCase = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowerCAmelCase = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
if components is None:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Dict = list(_A )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(_A ,self.__components ) ) + ")"
def __add__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self )
if size == len(_A ):
_lowerCAmelCase : List[Any] = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self )
if size == len(_A ):
_lowerCAmelCase : int = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self ,_A ):
'''simple docstring'''
...
@overload
def __mul__( self ,_A ):
'''simple docstring'''
...
def __mul__( self ,_A ):
'''simple docstring'''
if isinstance(_A ,(float, int) ):
_lowerCAmelCase : Tuple = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A ,_A ) and len(self ) == len(_A ):
_lowerCAmelCase : List[Any] = len(self )
_lowerCAmelCase : Union[str, Any] = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def __lowerCamelCase ( self ):
'''simple docstring'''
return Vector(self.__components )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if isinstance(_A ,_A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_lowerCAmelCase : List[Any] = value
def __lowerCamelCase ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
_lowerCAmelCase : Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def __lowerCamelCase ( self ,_A ,_A = False ):
'''simple docstring'''
_lowerCAmelCase : int = self * other
_lowerCAmelCase : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
return Vector([0] * dimension )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (isinstance(_lowerCamelCase , _lowerCamelCase ))
_lowerCAmelCase : Any = [0] * dimension
_lowerCAmelCase : Dict = 1
return Vector(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (isinstance(_lowerCamelCase , (int, float) ))
)
return x * scalar + y
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
random.seed(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = [random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
class __UpperCamelCase :
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = matrix
_lowerCAmelCase : List[Any] = w
_lowerCAmelCase : Tuple = h
def __str__( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self ,_A ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase : str = []
for i in range(self.__height ):
_lowerCAmelCase : str = [
self.__matrix[i][j] + other.component(_A ,_A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A ,self.__width ,self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self ,_A ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase : str = []
for i in range(self.__height ):
_lowerCAmelCase : Tuple = [
self.__matrix[i][j] - other.component(_A ,_A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A ,self.__width ,self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self ,_A ):
'''simple docstring'''
...
@overload
def __mul__( self ,_A ):
'''simple docstring'''
...
def __mul__( self ,_A ):
'''simple docstring'''
if isinstance(_A ,_A ): # matrix-vector
if len(_A ) == self.__width:
_lowerCAmelCase : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
_lowerCAmelCase : Dict = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A ,sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A ,(int, float) ): # matrix-scalar
_lowerCAmelCase : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A ,self.__width ,self.__height )
return None
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.__height
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.__width
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_lowerCAmelCase : Union[str, Any] = value
else:
raise Exception('change_component: indices out of bounds' )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
_lowerCAmelCase : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
_lowerCAmelCase : int = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A ,self.__width - 1 ,self.__height - 1 ).determinant()
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A ,_A )
else:
raise Exception('Indices out of bounds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_lowerCAmelCase : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 ,_A ) for y in range(self.__width )
]
return sum(_A )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[float]] = [[0] * n for _ in range(_lowerCamelCase )]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
random.seed(_lowerCamelCase )
_lowerCAmelCase : list[list[float]] = [
[random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )
]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_lowerCAmelCase = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "facebook/nllb-200-distilled-600M"
_UpperCAmelCase = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
_UpperCAmelCase = "translator"
_UpperCAmelCase = AutoTokenizer
_UpperCAmelCase = AutoModelForSeqaSeqLM
_UpperCAmelCase = LANGUAGE_CODES
_UpperCAmelCase = ["text", "text", "text"]
_UpperCAmelCase = ["text"]
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase : Tuple = self.lang_to_code[src_lang]
_lowerCAmelCase : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_A ,return_tensors='pt' ,src_lang=_A ,tgt_lang=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.model.generate(**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=_A )
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for param in module.parameters():
_lowerCAmelCase : Any = False
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : str = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCAmelCase : Any = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = datetime.now()
_lowerCAmelCase : List[str] = current_time.strftime('%H:%M:%S' )
return timestamp
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
_lowerCAmelCase = {
"""abeja/gpt-neox-japanese-2.7b""": 2_0_4_8,
}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : Tuple = json.loads(f.read() )
_lowerCAmelCase : Tuple = collections.OrderedDict()
_lowerCAmelCase : List[str] = collections.OrderedDict()
_lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : Tuple = f.readlines()
_lowerCAmelCase : int = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = b
_lowerCAmelCase : Dict = idx
for wd in b:
_lowerCAmelCase : Dict = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A ,_A="<|endoftext|>" ,_A="<|endoftext|>" ,_A="<|startoftext|>" ,_A="<|endoftext|>" ,_A=False ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,pad_token=_A ,bos_token=_A ,eos_token=_A ,do_clean_text=_A ,**_A ,)
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
_lowerCAmelCase : Tuple = do_clean_text
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = load_vocab_and_emoji(_A ,_A )
_lowerCAmelCase : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.raw_vocab )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(_A ,clean=self.do_clean_text )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.vocab.get(_A ,self.vocab.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ''.join(_A ).strip()
return out_string
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A ,add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
_lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 0
if os.path.isdir(_A ):
_lowerCAmelCase : int = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Optional[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
_lowerCAmelCase : Dict = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_lowerCAmelCase : List[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
_lowerCAmelCase : Optional[int] = token_index
writer.write(','.join(_A ) + '\n' )
index += 1
with open(_A ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,_A )
return vocab_file, emoji_file
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = vocab # same as swe
_lowerCAmelCase : int = ids_to_tokens # same as bpe
_lowerCAmelCase : List[Any] = emoji
_lowerCAmelCase : Tuple = np.max([len(_A ) for w in self.vocab.keys()] )
_lowerCAmelCase : Dict = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
_lowerCAmelCase : int = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
_lowerCAmelCase : Optional[int] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
_lowerCAmelCase : Optional[int] = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_lowerCAmelCase : Optional[int] = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_lowerCAmelCase : Dict = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
_lowerCAmelCase : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_lowerCAmelCase : List[str] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_lowerCAmelCase : Union[str, Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
'''simple docstring'''
return len(self.ids_to_tokens )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.content_repattera.sub('<URL>' ,_A )
_lowerCAmelCase : int = self.content_repattera.sub('<EMAIL>' ,_A )
_lowerCAmelCase : List[Any] = self.content_repattera.sub('<TEL>' ,_A )
_lowerCAmelCase : str = self.content_repattera.sub('<DATE>' ,_A )
_lowerCAmelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' ,_A )
_lowerCAmelCase : Any = self.content_repattera.sub('<PRICE>' ,_A )
_lowerCAmelCase : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_lowerCAmelCase : int = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def __lowerCamelCase ( self ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : Tuple = text.replace(' ' ,'<SP>' )
_lowerCAmelCase : Optional[Any] = text.replace(' ' ,'<SP>' )
_lowerCAmelCase : List[str] = text.replace('\r\n' ,'<BR>' )
_lowerCAmelCase : Dict = text.replace('\n' ,'<BR>' )
_lowerCAmelCase : int = text.replace('\r' ,'<BR>' )
_lowerCAmelCase : int = text.replace('\t' ,'<TAB>' )
_lowerCAmelCase : Optional[Any] = text.replace('—' ,'ー' )
_lowerCAmelCase : List[str] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
_lowerCAmelCase : List[Any] = text.replace(_A ,_A )
if clean:
_lowerCAmelCase : Any = self.clean_text(_A )
def check_simbol(_A ):
_lowerCAmelCase : Tuple = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
_lowerCAmelCase : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(_A ):
_lowerCAmelCase : Optional[Any] = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
_lowerCAmelCase : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_8080 and c <= 0xE2_B07F:
return True
return False
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : int = []
while pos < len(_A ):
_lowerCAmelCase : Tuple = min(len(_A ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
_lowerCAmelCase : int = [] # (token_id, token, pos)
for e in range(_A ,_A ,-1 ):
_lowerCAmelCase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
_lowerCAmelCase : Optional[int] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = sorted(_A ,key=lambda _A : x[0] )[0]
result.append(_A )
_lowerCAmelCase : List[str] = e
else:
_lowerCAmelCase : Dict = pos + 1
_lowerCAmelCase : Tuple = text[pos:end]
if check_simbol(_A ):
result.append('<KIGOU>' )
elif checkuae(_A ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
_lowerCAmelCase : str = end
return result
def __lowerCamelCase ( self ,_A ,_A="\n" ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' ,errors='replace' ) )
_lowerCAmelCase : int = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' ,errors='replace' ) )
_lowerCAmelCase : Tuple = ''.join(_A )
return text
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowerCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
with open(_lowerCamelCase , 'r' ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = line.strip()
if line:
_lowerCAmelCase : Dict = line.split()
_lowerCAmelCase : Union[str, Any] = line_number
_lowerCAmelCase : Dict = words[0]
_lowerCAmelCase : Any = value
return result
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split('.' ):
_lowerCAmelCase : Any = getattr(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
_lowerCAmelCase : Dict = 'param'
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : Any = hf_pointer
for attribute in hf_param_name.split('.' ):
_lowerCAmelCase : Dict = getattr(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase : List[str] = value[0]
else:
_lowerCAmelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCAmelCase : Any = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Any = value
elif weight_type == "bias":
_lowerCAmelCase : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
_lowerCAmelCase : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = value
else:
_lowerCAmelCase : Optional[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
_lowerCAmelCase : List[Any] = 'param'
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : List[str] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : List[str] = '.'.join([key, hf_param_name] )
else:
_lowerCAmelCase : str = key
_lowerCAmelCase : List[Any] = value if 'lm_head' in full_key else value[0]
_lowerCAmelCase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : str = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCAmelCase : Optional[Any] = True
if "*" in mapped_key:
_lowerCAmelCase : Tuple = name.split(_lowerCamelCase )[0].split('.' )[-2]
_lowerCAmelCase : List[Any] = mapped_key.replace('*' , _lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : List[str] = 'weight_g'
elif "weight_v" in name:
_lowerCAmelCase : str = 'weight_v'
elif "bias" in name:
_lowerCAmelCase : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : List[Any] = 'weight'
else:
_lowerCAmelCase : Tuple = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = fairseq_model.state_dict()
_lowerCAmelCase : int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
_lowerCAmelCase : List[Any] = True
else:
_lowerCAmelCase : Union[str, Any] = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = full_name.split('conv_layers.' )[-1]
_lowerCAmelCase : Optional[Any] = name.split('.' )
_lowerCAmelCase : Union[str, Any] = int(items[0] )
_lowerCAmelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCAmelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCAmelCase : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCAmelCase : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=False ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : int = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase : List[Any] = read_txt_into_dict(_lowerCamelCase )
_lowerCAmelCase : List[str] = idalabel
_lowerCAmelCase : Optional[Any] = WavaVecaForSequenceClassification(_lowerCamelCase )
_lowerCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase : Union[str, Any] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : int = target_dict.pad_index
_lowerCAmelCase : Union[str, Any] = target_dict.bos_index
_lowerCAmelCase : Union[str, Any] = target_dict.eos_index
_lowerCAmelCase : Any = len(target_dict.symbols )
_lowerCAmelCase : Union[str, Any] = os.path.join(_lowerCamelCase , 'vocab.json' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCAmelCase : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = 1
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowerCamelCase , )
_lowerCAmelCase : List[str] = True if config.feat_extract_norm == 'layer' else False
_lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCAmelCase : Dict = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : Any = WavaVecaForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[Any] = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowerCAmelCase : Tuple = argparse.Namespace(task='audio_pretraining' )
_lowerCAmelCase : str = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCAmelCase : List[str] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "BlipImageProcessor"
_UpperCAmelCase = "AutoTokenizer"
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__(_A ,_A )
# add QFormer tokenizer
_lowerCAmelCase : str = qformer_tokenizer
def __call__( self ,_A = None ,_A = None ,_A = True ,_A = False ,_A = None ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A = False ,_A = False ,_A = False ,_A = False ,_A = False ,_A = True ,_A = None ,**_A ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCAmelCase : List[str] = BatchFeature()
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
text=_A ,add_special_tokens=_A ,padding=_A ,truncation=_A ,max_length=_A ,stride=_A ,pad_to_multiple_of=_A ,return_attention_mask=_A ,return_overflowing_tokens=_A ,return_special_tokens_mask=_A ,return_offsets_mapping=_A ,return_token_type_ids=_A ,return_length=_A ,verbose=_A ,return_tensors=_A ,**_A ,)
encoding.update(_A )
_lowerCAmelCase : Any = self.qformer_tokenizer(
text=_A ,add_special_tokens=_A ,padding=_A ,truncation=_A ,max_length=_A ,stride=_A ,pad_to_multiple_of=_A ,return_attention_mask=_A ,return_overflowing_tokens=_A ,return_special_tokens_mask=_A ,return_offsets_mapping=_A ,return_token_type_ids=_A ,return_length=_A ,verbose=_A ,return_tensors=_A ,**_A ,)
_lowerCAmelCase : List[str] = qformer_text_encoding.pop('input_ids' )
_lowerCAmelCase : Optional[int] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCAmelCase : Optional[Any] = self.image_processor(_A ,return_tensors=_A )
encoding.update(_A )
return encoding
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A ,**_A )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A ,**_A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.model_input_names
_lowerCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __lowerCamelCase ( self ,_A ,**_A ):
'''simple docstring'''
if os.path.isfile(_A ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_A ,exist_ok=_A )
_lowerCAmelCase : Union[str, Any] = os.path.join(_A ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_A )
return super().save_pretrained(_A ,**_A )
@classmethod
def __lowerCamelCase ( cls ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_A ,subfolder='qformer_tokenizer' )
_lowerCAmelCase : Dict = cls._get_arguments_from_pretrained(_A ,**_A )
args.append(_A )
return cls(*_A )
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
class __UpperCamelCase ( metaclass=a__ ):
_UpperCAmelCase = ["sentencepiece"]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['sentencepiece'] )
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "ctrl"
_UpperCAmelCase = ["past_key_values"]
_UpperCAmelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,_A=24_6534 ,_A=256 ,_A=1280 ,_A=8192 ,_A=48 ,_A=16 ,_A=0.1 ,_A=0.1 ,_A=1E-6 ,_A=0.0_2 ,_A=True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[Any] = n_positions
_lowerCAmelCase : Dict = n_embd
_lowerCAmelCase : Dict = n_layer
_lowerCAmelCase : List[str] = n_head
_lowerCAmelCase : Tuple = dff
_lowerCAmelCase : List[Any] = resid_pdrop
_lowerCAmelCase : Dict = embd_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = use_cache
super().__init__(**_A )
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=10 ,_A=3 ,_A=2 ,_A=2 ,_A=True ,_A=True ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=10 ,_A=0.0_2 ,_A="divided_space_time" ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : int = num_frames
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_type
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCAmelCase : str = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
_lowerCAmelCase : List[str] = self.num_labels
return config
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TimesformerModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = TimesformerForVideoClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_A )
# verify the logits shape
_lowerCAmelCase : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TimesformerModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(
self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def __lowerCamelCase ( self ,_A ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(_A )
if return_labels:
if model_class in get_values(_A ):
_lowerCAmelCase : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_A )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = TimesformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
_lowerCAmelCase, _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = True
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = self.model_tester.seq_length
_lowerCAmelCase : Union[str, Any] = self.model_tester.num_frames
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = False
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : str = outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
_lowerCAmelCase : Optional[Any] = len(_A )
# Check attention is always last and order is fine
_lowerCAmelCase : int = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Dict = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : str = model(**self._prepare_for_class(_A ,_A ) )
self.assertEqual(out_len + 1 ,len(_A ) )
_lowerCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : List[Any] = outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_A ) ,_A )
_lowerCAmelCase : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_lowerCAmelCase, _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : str = True
check_hidden_states_output(_A ,_A ,_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowerCAmelCase : Any = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
_A )
_lowerCAmelCase : List[Any] = self.default_image_processor
_lowerCAmelCase : Tuple = prepare_video()
_lowerCAmelCase : int = image_processor(video[:8] ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Optional[Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_A ,atol=1E-4 ) )
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "new-model"
if is_tf_available():
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = NewModelConfig
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 'bert-base-cased'
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'bert-base-cased'
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Optional[int] = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = TFAutoModelForCausalLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
@require_tensorflow_probability
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : str = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) ,1_4410 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) ,1_4410 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCAmelCase : Union[str, Any] = ['FunnelBaseModel']
_lowerCAmelCase : List[str] = TFAutoModel.from_config(_A )
self.assertIsInstance(_A ,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('new-model' ,_A )
_lowerCAmelCase : Any = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A ,_A )
auto_class.register(_A ,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A ,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCAmelCase : Optional[int] = NewModelConfig(**tiny_config.to_dict() )
_lowerCAmelCase : str = auto_class.from_config(_A )
self.assertIsInstance(_A ,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
_lowerCAmelCase : Union[str, Any] = auto_class.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained('bert-base' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained(_A ,revision='aaaaaa' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' ,):
_lowerCAmelCase : Any = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_A ,'Use `from_pt=True` to load this model' ):
_lowerCAmelCase : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
_lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCAmelCase : List[str] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __UpperCamelCase ( a__ ):
def __init__( self ,_A=0.0_1 ,_A=1000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = p_stop
_lowerCAmelCase : str = max_length
def __iter__( self ):
'''simple docstring'''
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCAmelCase : Any = random.random() < self.p_stop
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ,_A ,_A ,_A=False ,_A=True ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [
BatchSamplerShard(_A ,2 ,_A ,split_batches=_A ,even_batches=_A )
for i in range(2 )
]
_lowerCAmelCase : Optional[int] = [list(_A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_A ) for shard in batch_sampler_shards] ,[len(_A ) for e in expected] )
self.assertListEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A ,_A )
_lowerCAmelCase : int = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A ,_A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_A ,_A )
_lowerCAmelCase : Dict = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : Dict = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_A ,_A )
_lowerCAmelCase : int = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : List[str] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_A ,_A )
_lowerCAmelCase : str = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : int = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_A ,_A )
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : List[str] = [[], []]
self.check_batch_sampler_shards(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
_lowerCAmelCase : List[Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : Tuple = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
_lowerCAmelCase : Optional[Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : Tuple = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
_lowerCAmelCase : int = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : Tuple = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : List[str] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
_lowerCAmelCase : List[str] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Tuple = [[], []]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
_lowerCAmelCase : Dict = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : Tuple = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
_lowerCAmelCase : Optional[int] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : str = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : Tuple = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
_lowerCAmelCase : List[Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : int = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
_lowerCAmelCase : str = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=_A )
_lowerCAmelCase : str = [[], []]
self.check_batch_sampler_shards(_A ,_A ,even_batches=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
_lowerCAmelCase : str = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : List[str] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : int = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
_lowerCAmelCase : Any = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
# Check the shards when the dataset is very small.
_lowerCAmelCase : Dict = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
_lowerCAmelCase : Dict = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : Tuple = [[], []]
self.check_batch_sampler_shards(_A ,_A ,split_batches=_A ,even_batches=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowerCAmelCase : Tuple = [BatchSamplerShard(_A ,2 ,_A ,even_batches=_A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 10, 11]] )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=False ,_A=2 ,_A=False ):
'''simple docstring'''
random.seed(_A )
_lowerCAmelCase : Tuple = list(_A )
_lowerCAmelCase : List[str] = [
IterableDatasetShard(
_A ,batch_size=_A ,drop_last=_A ,num_processes=_A ,process_index=_A ,split_batches=_A ,)
for i in range(_A )
]
_lowerCAmelCase : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_A )
iterable_dataset_lists.append(list(_A ) )
_lowerCAmelCase : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCAmelCase : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_A ) ,len(_A ) )
self.assertTrue(len(_A ) % shard_batch_size == 0 )
_lowerCAmelCase : Union[str, Any] = []
for idx in range(0 ,len(_A ) ,_A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_A ) < len(_A ):
reference += reference
self.assertListEqual(_A ,reference[: len(_A )] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 42
_lowerCAmelCase : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
# Edge case with a very small dataset
_lowerCAmelCase : Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
self.check_iterable_dataset_shards(_A ,_A ,batch_size=4 ,drop_last=_A ,split_batches=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = BatchSampler(range(16 ) ,batch_size=4 ,drop_last=_A )
_lowerCAmelCase : int = SkipBatchSampler(_A ,2 )
self.assertListEqual(list(_A ) ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = SkipDataLoader(list(range(16 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DataLoader(list(range(16 ) ) ,batch_size=4 )
_lowerCAmelCase : int = skip_first_batches(_A ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = DataLoaderShard(list(range(16 ) ) ,batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
_lowerCAmelCase : Tuple = DataLoaderDispatcher(range(16 ) ,batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCAmelCase = get_logger(__name__)
_lowerCAmelCase = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class __UpperCamelCase :
@add_start_docstrings(_A )
def __call__( self ,_A ,_A ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __UpperCamelCase :
@add_start_docstrings(_A )
def __call__( self ,_A ,_A ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __UpperCamelCase ( a__ ):
@add_start_docstrings(_A )
def __call__( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
for processor in self:
_lowerCAmelCase : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(_A ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
_lowerCAmelCase : Tuple = processor(_A ,_A ,_A ,**_A )
else:
_lowerCAmelCase : Optional[int] = processor(_A ,_A ,_A )
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
if not isinstance(_A ,_A ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
_lowerCAmelCase : List[Any] = temperature
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = scores / self.temperature
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A = -float('Inf' ) ,_A = 1 ):
'''simple docstring'''
if not isinstance(_A ,_A ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(_A ,_A ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
_lowerCAmelCase : List[Any] = top_p
_lowerCAmelCase : str = filter_value
_lowerCAmelCase : Union[str, Any] = min_tokens_to_keep
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = lax.top_k(_A ,scores.shape[-1] )
_lowerCAmelCase : Union[str, Any] = jnp.full_like(_A ,self.filter_value )
_lowerCAmelCase : Optional[int] = jax.nn.softmax(_A ,axis=-1 ).cumsum(axis=-1 )
_lowerCAmelCase : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_lowerCAmelCase : Optional[Any] = jnp.roll(_A ,1 )
score_mask |= score_mask.at[:, 0].set(_A )
# min tokens to keep
_lowerCAmelCase : List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(_A )
_lowerCAmelCase : List[str] = jnp.where(_A ,_A ,_A )
_lowerCAmelCase : Tuple = jax.lax.sort_key_val(_A ,_A )[-1]
return next_scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A = -float('Inf' ) ,_A = 1 ):
'''simple docstring'''
if not isinstance(_A ,_A ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
_lowerCAmelCase : Dict = max(_A ,_A )
_lowerCAmelCase : Dict = filter_value
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = scores.shape
_lowerCAmelCase : Union[str, Any] = jnp.full(batch_size * vocab_size ,self.filter_value )
_lowerCAmelCase : str = min(self.top_k ,scores.shape[-1] ) # Safety check
_lowerCAmelCase, _lowerCAmelCase : int = lax.top_k(_A ,_A )
_lowerCAmelCase : Union[str, Any] = jnp.broadcast_to((jnp.arange(_A ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
_lowerCAmelCase : Optional[int] = topk_scores.flatten()
_lowerCAmelCase : Optional[int] = topk_indices.flatten() + shift
_lowerCAmelCase : List[Any] = next_scores_flat.at[topk_indices_flat].set(_A )
_lowerCAmelCase : Union[str, Any] = next_scores_flat.reshape(_A ,_A )
return next_scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = bos_token_id
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = jnp.full(scores.shape ,-float('inf' ) )
_lowerCAmelCase : Any = 1 - jnp.bool_(cur_len - 1 )
_lowerCAmelCase : Optional[Any] = jnp.where(_A ,new_scores.at[:, self.bos_token_id].set(0 ) ,_A )
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = max_length
_lowerCAmelCase : Dict = eos_token_id
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = jnp.full(scores.shape ,-float('inf' ) )
_lowerCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_lowerCAmelCase : Any = jnp.where(_A ,new_scores.at[:, self.eos_token_id].set(0 ) ,_A )
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
if not isinstance(_A ,_A ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(_A ,_A ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
_lowerCAmelCase : Union[str, Any] = min_length
_lowerCAmelCase : Dict = eos_token_id
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
_lowerCAmelCase : int = jnp.where(_A ,scores.at[:, self.eos_token_id].set(-float('inf' ) ) ,_A )
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = list(_A )
_lowerCAmelCase : str = begin_index
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
_lowerCAmelCase : List[Any] = jnp.where(_A ,scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) ,_A )
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = list(_A )
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(_A )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_lowerCAmelCase : int = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_lowerCAmelCase : Optional[int] = force_token_array.at[index].set(_A )
_lowerCAmelCase : str = jnp.intaa(_A )
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
def _force_token(_A ):
_lowerCAmelCase : List[str] = scores.shape[0]
_lowerCAmelCase : Tuple = self.force_token_array[generation_idx]
_lowerCAmelCase : int = jnp.ones_like(_A ,dtype=scores.dtype ) * -float('inf' )
_lowerCAmelCase : List[Any] = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
_lowerCAmelCase : Optional[Any] = lax.dynamic_update_slice(_A ,_A ,(0, current_token) )
return new_scores
_lowerCAmelCase : Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(_A ) ,lambda: scores ,) ,)
return scores
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = generate_config.eos_token_id
_lowerCAmelCase : Union[str, Any] = generate_config.no_timestamps_token_id
_lowerCAmelCase : List[str] = generate_config.no_timestamps_token_id + 1
_lowerCAmelCase : Optional[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_A ,'max_initial_timestamp_index' ):
_lowerCAmelCase : Optional[Any] = generate_config.max_initial_timestamp_index
else:
_lowerCAmelCase : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_lowerCAmelCase : str = model_config.vocab_size
def __call__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_A ,_A ):
_lowerCAmelCase : str = jnp.where((cur_len - self.begin_index) >= 1 ,_A ,_A )
_lowerCAmelCase : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,_A ,)
_lowerCAmelCase : Any = jnp.where((cur_len - self.begin_index) < 2 ,_A ,_A )
_lowerCAmelCase : int = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,_A ,_A ,)
return jnp.where(
_A ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) ,scores_k.at[: self.eos_token_id].set(-float('inf' ) ) ,) ,_A ,)
_lowerCAmelCase : Dict = jax.vmap(_A )(_A ,_A )
_lowerCAmelCase : Tuple = jnp.where(cur_len == self.begin_index ,_A ,_A )
_lowerCAmelCase : List[str] = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,_A ,)
_lowerCAmelCase : Dict = self.timestamp_begin + self.max_initial_timestamp_index
_lowerCAmelCase : Dict = jnp.where(
_A ,scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) ,_A ,)
# if sum of probability over timestamps is above any other token, sample timestamp
_lowerCAmelCase : Dict = jax.nn.log_softmax(_A ,axis=-1 )
def handle_cumulative_probs(_A ,_A ):
_lowerCAmelCase : Tuple = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
_lowerCAmelCase : Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) ,_A ,)
_lowerCAmelCase : Any = jax.vmap(_A )(_A ,_A )
return scores
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "trocr"
_UpperCAmelCase = ["past_key_values"]
_UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self ,_A=5_0265 ,_A=1024 ,_A=12 ,_A=16 ,_A=4096 ,_A="gelu" ,_A=512 ,_A=0.1 ,_A=0.0 ,_A=0.0 ,_A=2 ,_A=0.0_2 ,_A=0.0 ,_A=True ,_A=False ,_A=True ,_A=True ,_A=1 ,_A=0 ,_A=2 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Tuple = d_model
_lowerCAmelCase : List[str] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Optional[int] = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Tuple = dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : int = init_std
_lowerCAmelCase : Dict = decoder_layerdrop
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : int = scale_embedding
_lowerCAmelCase : Optional[Any] = use_learned_position_embeddings
_lowerCAmelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,decoder_start_token_id=_A ,**_A ,)
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
_lowerCAmelCase : Optional[Any] = os.path.abspath(_lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location='cpu' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
_lowerCAmelCase : Optional[Any] = convert_pytorch_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_lowerCAmelCase : List[str] = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
return flax_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_lowerCamelCase ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_lowerCAmelCase : int = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_lowerCAmelCase : int = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_lowerCAmelCase : Dict = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_lowerCAmelCase : List[str] = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowerCAmelCase : List[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowerCAmelCase : Any = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowerCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowerCAmelCase : int = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_lowerCAmelCase : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_lowerCAmelCase : List[str] = pt_tuple_key[-2] + '_v'
if name is not None:
_lowerCAmelCase : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_lowerCAmelCase : Optional[int] = flax_model.params['params']
else:
_lowerCAmelCase : Union[str, Any] = flax_model.params
_lowerCAmelCase : str = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : List[Any] = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_lowerCamelCase )
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Any = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
_lowerCAmelCase : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_lowerCAmelCase : Tuple = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : str = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : int = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
import torch
# Load the index
_lowerCAmelCase : Optional[int] = {}
for shard_file in shard_filenames:
# load using msgpack utils
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : Optional[Any] = flax_model.params['params']
_lowerCAmelCase : Union[str, Any] = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
_lowerCAmelCase : Optional[Any] = flax_model.params
_lowerCAmelCase : str = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : Tuple = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
_lowerCAmelCase : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase, _lowerCAmelCase : int = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_lowerCAmelCase : str = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
_lowerCAmelCase : Optional[Any] = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Union[str, Any] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : List[str] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = os.path.abspath(_lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
_lowerCAmelCase : Union[str, Any] = getattr(_lowerCamelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase , 'rb' ) as state_f:
try:
_lowerCAmelCase : Any = from_bytes(_lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
_lowerCAmelCase : Tuple = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
_lowerCAmelCase : List[Any] = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = pt_model.state_dict()
_lowerCAmelCase : Any = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
_lowerCAmelCase : List[str] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
_lowerCAmelCase : List[Any] = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
_lowerCAmelCase : Tuple = flax_key_tuple[:-1] + ('weight',)
_lowerCAmelCase : Tuple = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
_lowerCAmelCase : List[Any] = flax_key_tuple[:-1] + ('weight',)
_lowerCAmelCase : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCAmelCase : int = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_lowerCAmelCase : List[Any] = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
_lowerCAmelCase : Optional[int] = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
_lowerCAmelCase : Any = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_lowerCAmelCase : Tuple = '.'.join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_lowerCAmelCase : Any = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_lowerCAmelCase : Optional[Any] = key.split('.' )
_lowerCAmelCase : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_lowerCAmelCase : List[Any] = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
_lowerCAmelCase : Tuple = key_components[-2] + '_v'
if name is not None:
_lowerCAmelCase : int = key_components[:-3] + [name]
_lowerCAmelCase : Optional[Any] = '.'.join(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = key
if flax_key in special_pt_names:
_lowerCAmelCase : Tuple = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_lowerCAmelCase : Union[str, Any] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_lowerCAmelCase : Optional[int] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : Dict = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'If your task is similar to the task the model of the checkpoint was trained on, '
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
from math import isqrt
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(_lowerCamelCase ) + 1 ) )
def lowerCamelCase__ ( _lowerCamelCase = 10**6 ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Union[str, Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ,_A = None ,):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_A ,vae=_A ,scheduler=_A )
# create a imagenet -> id dictionary for easier use
_lowerCAmelCase : Union[str, Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
_lowerCAmelCase : Tuple = int(_A )
_lowerCAmelCase : Dict = dict(sorted(self.labels.items() ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if not isinstance(_A ,_A ):
_lowerCAmelCase : Tuple = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self ,_A ,_A = 4.0 ,_A = None ,_A = 50 ,_A = "pil" ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = len(_A )
_lowerCAmelCase : List[Any] = self.transformer.config.sample_size
_lowerCAmelCase : Dict = self.transformer.config.in_channels
_lowerCAmelCase : str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=_A ,device=self.device ,dtype=self.transformer.dtype ,)
_lowerCAmelCase : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_lowerCAmelCase : Optional[Any] = torch.tensor(_A ,device=self.device ).reshape(-1 )
_lowerCAmelCase : str = torch.tensor([1000] * batch_size ,device=self.device )
_lowerCAmelCase : Dict = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_lowerCAmelCase : int = latent_model_input[: len(_A ) // 2]
_lowerCAmelCase : int = torch.cat([half, half] ,dim=0 )
_lowerCAmelCase : Union[str, Any] = self.scheduler.scale_model_input(_A ,_A )
_lowerCAmelCase : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowerCAmelCase : Union[str, Any] = latent_model_input.device.type == 'mps'
if isinstance(_A ,_A ):
_lowerCAmelCase : List[str] = torch.floataa if is_mps else torch.floataa
else:
_lowerCAmelCase : Union[str, Any] = torch.intaa if is_mps else torch.intaa
_lowerCAmelCase : Optional[Any] = torch.tensor([timesteps] ,dtype=_A ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_lowerCAmelCase : Any = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_lowerCAmelCase : Optional[int] = self.transformer(
_A ,timestep=_A ,class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
_lowerCAmelCase, _lowerCAmelCase : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = torch.split(_A ,len(_A ) // 2 ,dim=0 )
_lowerCAmelCase : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowerCAmelCase : int = torch.cat([half_eps, half_eps] ,dim=0 )
_lowerCAmelCase : List[Any] = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowerCAmelCase, _lowerCAmelCase : Any = torch.split(_A ,_A ,dim=1 )
else:
_lowerCAmelCase : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : Tuple = self.scheduler.step(_A ,_A ,_A ).prev_sample
if guidance_scale > 1:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = latent_model_input.chunk(2 ,dim=0 )
else:
_lowerCAmelCase : int = latent_model_input
_lowerCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
_lowerCAmelCase : Dict = self.vae.decode(_A ).sample
_lowerCAmelCase : int = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : List[str] = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {"add_prefix_space": True}
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_lowerCAmelCase : Dict = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase : Union[str, Any] = {'unk_token': '<unk>'}
_lowerCAmelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'lower newer'
_lowerCAmelCase : int = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowerCAmelCase : List[Any] = 'lower newer'
_lowerCAmelCase : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCAmelCase : Any = tokenizer.tokenize(_A ,add_prefix_space=_A )
self.assertListEqual(_A ,_A )
_lowerCAmelCase : Any = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=_A )
_lowerCAmelCase : Dict = 'lower newer'
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(_A ,add_prefix_space=_A )
_lowerCAmelCase : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A ,_A )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(_A ,add_special_tokens=_A ,add_prefix_space=_A )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(_A ,add_special_tokens=_A )
self.assertListEqual(_A ,_A )
# Testing conversion to ids with special tokens
_lowerCAmelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=_A )
_lowerCAmelCase : Optional[int] = tokenizer.encode(_A ,add_prefix_space=_A )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A ,_A )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) ,_A )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ,_A=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_A ,**_A )
# Simple input
_lowerCAmelCase : Dict = 'This is a simple input'
_lowerCAmelCase : Dict = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase : Any = ('This is a simple input', 'This is a pair')
_lowerCAmelCase : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_A ,tokenizer_r.encode ,_A ,max_length=_A ,padding='max_length' )
# Simple input
self.assertRaises(_A ,tokenizer_r.encode_plus ,_A ,max_length=_A ,padding='max_length' )
# Simple input
self.assertRaises(
_A ,tokenizer_r.batch_encode_plus ,_A ,max_length=_A ,padding='max_length' ,)
# Pair input
self.assertRaises(_A ,tokenizer_r.encode ,_A ,max_length=_A ,padding='max_length' )
# Pair input
self.assertRaises(_A ,tokenizer_r.encode_plus ,_A ,max_length=_A ,padding='max_length' )
# Pair input
self.assertRaises(
_A ,tokenizer_r.batch_encode_plus ,_A ,max_length=_A ,padding='max_length' ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' )
# Simple input
_lowerCAmelCase : str = 'This is a simple input'
_lowerCAmelCase : List[Any] = ['This is a simple input looooooooong', 'This is a simple input']
_lowerCAmelCase : Optional[Any] = ('This is a simple input', 'This is a pair')
_lowerCAmelCase : List[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer(_A ,padding='max_length' ,max_length=30 ,return_tensors='np' )
_lowerCAmelCase : Optional[int] = tokenizer(_A ,padding=_A ,truncate=_A ,return_tensors='np' )
_lowerCAmelCase : Dict = tokenizer(*_A ,padding='max_length' ,max_length=60 ,return_tensors='np' )
_lowerCAmelCase : Optional[Any] = tokenizer(_A ,padding=_A ,truncate=_A ,return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = '$$$'
_lowerCAmelCase : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=_A ,add_bos_token=_A )
_lowerCAmelCase : List[Any] = 'This is a simple input'
_lowerCAmelCase : int = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase : Dict = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer(_A )
_lowerCAmelCase : List[Any] = tokenizer(_A )
self.assertEqual(out_s.input_ids[0] ,_A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Any = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,_A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_lowerCAmelCase : Union[str, Any] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_lowerCAmelCase : str = '\nif len_a > len_b: result = a\nelse: result = b'
_lowerCAmelCase : Optional[Any] = tokenizer.encode(_A )
_lowerCAmelCase : str = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_lowerCAmelCase : List[Any] = tokenizer.decode(_A ,truncate_before_pattern=_A )
self.assertEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowerCAmelCase = get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase : str = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase : Optional[int] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
logger.info(f"""Saving model to {ckpt_dir}""" )
_lowerCAmelCase : List[Any] = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=_lowerCamelCase , storage_writer=dist_cp.FileSystemWriter(_lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_lowerCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
_lowerCAmelCase : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase : Any = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase : Optional[Any] = (
os.path.join(_lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
_lowerCAmelCase : Any = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=_lowerCamelCase , storage_reader=dist_cp.FileSystemReader(_lowerCamelCase ) , planner=DefaultLoadPlanner() , )
_lowerCAmelCase : List[str] = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase : int = FSDP.optim_state_dict(_lowerCamelCase , _lowerCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCAmelCase : Any = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCAmelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(_lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase : Optional[int] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCAmelCase : int = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
_lowerCAmelCase : str = (
os.path.join(_lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
_lowerCAmelCase : List[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(_lowerCamelCase ) , )
_lowerCAmelCase : int = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
_lowerCAmelCase : str = FSDP.optim_state_dict_to_load(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
optimizer.load_state_dict(_lowerCamelCase )
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCAmelCase : List[str] = [True] * (num + 1)
_lowerCAmelCase : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _lowerCamelCase ):
_lowerCAmelCase : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import random
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase : dict = {i: [] for i in range(_lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase ):
for j in range(i + 1 , _lowerCamelCase ):
if random.random() < probability:
graph[i].append(_lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase )
return graph
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase ) if i != j] for i in range(_lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if num <= 0:
_lowerCAmelCase : int = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = [True] * (num + 1)
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : str = int(math.sqrt(_lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowerCamelCase ):
if sieve[i] is True:
_lowerCAmelCase : List[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
import re
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = re.compile(R'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(_lowerCamelCase , _lowerCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = CanineTokenizer
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return CanineTokenizer.from_pretrained('google/canine-s' )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_A )
_lowerCAmelCase : List[Any] = 1024
return tokenizer
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.canine_tokenizer
_lowerCAmelCase : Tuple = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_lowerCAmelCase : int = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
_lowerCAmelCase : str = tokenizer(_A ,padding=_A ,return_tensors='pt' )
self.assertIsInstance(_A ,_A )
_lowerCAmelCase : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A ,_A )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.canine_tokenizer
_lowerCAmelCase : Optional[int] = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_lowerCAmelCase : Dict = tokenizer(_A ,padding=_A ,return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' ,_A )
self.assertIn('attention_mask' ,_A )
self.assertIn('token_type_ids' ,_A )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.canine_tokenizer
_lowerCAmelCase : Dict = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_lowerCAmelCase : Optional[int] = tokenizer(
text_target=_A ,max_length=32 ,padding='max_length' ,truncation=_A ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
_lowerCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : str = tempfile.mkdtemp()
_lowerCAmelCase : int = ' He is very happy, UNwant\u00E9d,running'
_lowerCAmelCase : Tuple = tokenizer.encode(_A ,add_special_tokens=_A )
tokenizer.save_pretrained(_A )
_lowerCAmelCase : str = tokenizer.__class__.from_pretrained(_A )
_lowerCAmelCase : int = after_tokenizer.encode(_A ,add_special_tokens=_A )
self.assertListEqual(_A ,_A )
shutil.rmtree(_A )
_lowerCAmelCase : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : str = ' He is very happy, UNwant\u00E9d,running'
_lowerCAmelCase : List[str] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_lowerCAmelCase : Optional[int] = chr(0xE007 )
additional_special_tokens.append(_A )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCAmelCase : Any = tokenizer.encode(_A ,add_special_tokens=_A )
tokenizer.save_pretrained(_A )
_lowerCAmelCase : int = tokenizer.__class__.from_pretrained(_A )
_lowerCAmelCase : str = after_tokenizer.encode(_A ,add_special_tokens=_A )
self.assertListEqual(_A ,_A )
self.assertIn(_A ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
_lowerCAmelCase : str = tokenizer.__class__.from_pretrained(_A ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase, _lowerCAmelCase : Dict = self.get_clean_sequence(_A )
# a special token for Canine can be defined as follows:
_lowerCAmelCase : List[Any] = 0xE005
_lowerCAmelCase : Dict = chr(_A )
tokenizer.add_special_tokens({'cls_token': special_token} )
_lowerCAmelCase : Any = tokenizer.encode(_A ,add_special_tokens=_A )
self.assertEqual(len(_A ) ,1 )
_lowerCAmelCase : Tuple = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_A )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(_A ,add_special_tokens=_A )
_lowerCAmelCase : List[Any] = tokenizer.encode(_A ,add_special_tokens=_A )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(_A ,add_special_tokens=_A )
self.assertEqual(_A ,input_encoded + special_token_id )
_lowerCAmelCase : List[Any] = tokenizer.decode(_A ,skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Union[str, Any] = chr(0xE005 )
_lowerCAmelCase : Tuple = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_A )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_lowerCAmelCase : str = tokenizer.tokenize(_A )
_lowerCAmelCase : Tuple = tokenizer.tokenize(_A )
self.assertEqual(len(_A ) ,1 )
self.assertEqual(len(_A ) ,1 )
self.assertEqual(token_a[0] ,_A )
self.assertEqual(token_a[0] ,_A )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
_lowerCAmelCase : Optional[int] = 0xE006
_lowerCAmelCase : List[Any] = chr(_A )
_lowerCAmelCase : Union[str, Any] = AddedToken(_A ,lstrip=_A )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_A )
tokenizer.from_pretrained(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file:
_lowerCAmelCase : List[Any] = json.load(_A )
with open(os.path.join(_A ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file:
_lowerCAmelCase : Optional[Any] = json.load(_A )
# a special token for Canine can be defined as follows:
_lowerCAmelCase : str = 0xE006
_lowerCAmelCase : List[str] = chr(_A )
_lowerCAmelCase : Dict = [new_token_a]
_lowerCAmelCase : Optional[Any] = [new_token_a]
with open(os.path.join(_A ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(_A ,_A )
with open(os.path.join(_A ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(_A ,_A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(_A ,extra_ids=0 )
self.assertIn(_A ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
_lowerCAmelCase : List[Any] = 0xE007
_lowerCAmelCase : List[str] = chr(_A )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase : Optional[Any] = [AddedToken(_A ,lstrip=_A )]
_lowerCAmelCase : str = tokenizer_class.from_pretrained(
_A ,additional_special_tokens=_A ,extra_ids=0 )
self.assertIn(_A ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : str = 'hello world'
if self.space_between_special_tokens:
_lowerCAmelCase : Optional[Any] = '[CLS] hello world [SEP]'
else:
_lowerCAmelCase : Optional[Any] = input
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(_A ,add_special_tokens=_A )
_lowerCAmelCase : Tuple = tokenizer.decode(_A ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_A ,[output, output.lower()] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Any = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowerCAmelCase : Union[str, Any] = 'a'
_lowerCAmelCase : List[str] = ord(_A )
for attr in attributes_list:
setattr(_A ,attr + '_id' ,_A )
self.assertEqual(getattr(_A ,_A ) ,_A )
self.assertEqual(getattr(_A ,attr + '_id' ) ,_A )
setattr(_A ,attr + '_id' ,_A )
self.assertEqual(getattr(_A ,_A ) ,_A )
self.assertEqual(getattr(_A ,attr + '_id' ) ,_A )
setattr(_A ,'additional_special_tokens_ids' ,[] )
self.assertListEqual(getattr(_A ,'additional_special_tokens' ) ,[] )
self.assertListEqual(getattr(_A ,'additional_special_tokens_ids' ) ,[] )
_lowerCAmelCase : List[Any] = 0xE006
_lowerCAmelCase : Any = chr(_A )
setattr(_A ,'additional_special_tokens_ids' ,[additional_special_token_id] )
self.assertListEqual(getattr(_A ,'additional_special_tokens' ) ,[additional_special_token] )
self.assertListEqual(getattr(_A ,'additional_special_tokens_ids' ) ,[additional_special_token_id] )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=_A ,activation_fn='gelu-approximate' ,num_embeds_ada_norm=1000 ,norm_type='ada_norm_zero' ,norm_elementwise_affine=_A ,)
_lowerCAmelCase : Union[str, Any] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Tuple = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : Dict = torch.manual_seed(_A )
else:
_lowerCAmelCase : int = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : Tuple = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu'
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(_A )
_lowerCAmelCase : Dict = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
_lowerCAmelCase : List[str] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A ,1E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=_A ,expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_lowerCAmelCase : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
_lowerCAmelCase : str = pipe.get_label_ids(_A )
_lowerCAmelCase : Dict = pipe(_A ,generator=_A ,num_inference_steps=40 ,output_type='np' ).images
for word, image in zip(_A ,_A ):
_lowerCAmelCase : Any = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_lowerCAmelCase : str = ['vase', 'umbrella']
_lowerCAmelCase : Any = pipe.get_label_ids(_A )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(_A ,generator=_A ,num_inference_steps=25 ,output_type='np' ).images
for word, image in zip(_A ,_A ):
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self ,**_A ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : str = deprecated_arg[3:]
_lowerCAmelCase : List[Any] = not kwargs.pop(_A )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_lowerCAmelCase : Tuple = kwargs.pop('tpu_name' ,self.tpu_name )
_lowerCAmelCase : Any = kwargs.pop('device_idx' ,self.device_idx )
_lowerCAmelCase : List[Any] = kwargs.pop('eager_mode' ,self.eager_mode )
_lowerCAmelCase : List[str] = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**_A )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Name of TPU"} , )
_UpperCAmelCase = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_UpperCAmelCase = field(default=a__ , metadata={"help": "Benchmark models in eager model."} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
_lowerCAmelCase : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
_lowerCAmelCase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCAmelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCAmelCase : Union[str, Any] = None
return tpu
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCAmelCase : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
_lowerCAmelCase : str = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
_lowerCAmelCase : List[str] = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [1]
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = 0, 0, 0
_lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 2
_lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 3
_lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ugly_nums.append(_lowerCamelCase )
if next_num == next_a:
ia += 1
_lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_0_0) = }''')
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
_lowerCAmelCase = """2020.9.26"""
_lowerCAmelCase = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not all(isinstance(_lowerCamelCase , (float, int) ) for val in locals().values() ):
_lowerCAmelCase : Any = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(_lowerCamelCase )
_lowerCAmelCase : Any = ((x * distance) / (z + distance)) * scale
_lowerCAmelCase : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Axis must be a str' )
_lowerCAmelCase : Union[str, Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowerCamelCase , (float, int) ) for val in input_variables.values() ):
_lowerCAmelCase : List[str] = (
'Input values except axis must either be float or int: '
f"""{list(input_variables.values() )}"""
)
raise TypeError(_lowerCamelCase )
_lowerCAmelCase : Tuple = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_lowerCAmelCase : Tuple = x * math.cos(_lowerCamelCase ) - y * math.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = y * math.cos(_lowerCamelCase ) + x * math.sin(_lowerCamelCase )
_lowerCAmelCase : List[Any] = z
elif axis == "x":
_lowerCAmelCase : str = y * math.cos(_lowerCamelCase ) - z * math.sin(_lowerCamelCase )
_lowerCAmelCase : List[Any] = z * math.cos(_lowerCamelCase ) + y * math.sin(_lowerCamelCase )
_lowerCAmelCase : Dict = x
elif axis == "y":
_lowerCAmelCase : Optional[Any] = x * math.cos(_lowerCamelCase ) - z * math.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = z * math.cos(_lowerCamelCase ) + x * math.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCAmelCase : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
_lowerCAmelCase : Any = value
else:
_lowerCAmelCase : Any = value
return new_state_dict
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCAmelCase : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : List[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[:256, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[:256]
_lowerCAmelCase : int = in_proj_weight[256:512, :]
_lowerCAmelCase : str = in_proj_bias[256:512]
_lowerCAmelCase : str = in_proj_weight[-256:, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase : str = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[:256, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[:256]
_lowerCAmelCase : List[str] = in_proj_weight[256:512, :]
_lowerCAmelCase : int = in_proj_bias[256:512]
_lowerCAmelCase : Any = in_proj_weight[-256:, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowerCAmelCase : List[Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCAmelCase : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
_lowerCAmelCase : str = in_proj_bias_cross_attn[:256]
_lowerCAmelCase : Tuple = in_proj_weight_cross_attn[256:512, :]
_lowerCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowerCAmelCase : List[str] = in_proj_weight_cross_attn[-256:, :]
_lowerCAmelCase : List[Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : str = image.size
_lowerCAmelCase : Optional[Any] = max(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = 800 if 'detection' in checkpoint_url else 1000
_lowerCAmelCase : int = target_max_size / current_max_size
_lowerCAmelCase : List[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = F.to_tensor(_lowerCamelCase )
_lowerCAmelCase : Dict = F.normalize(_lowerCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logger.info('Converting model...' )
# load original state dict
_lowerCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCAmelCase : Optional[Any] = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowerCAmelCase : List[str] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
# create HuggingFace model and load state dict
_lowerCAmelCase : Union[str, Any] = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_lowerCAmelCase : int = 15
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Union[str, Any] = {0: 'table', 1: 'table rotated'}
_lowerCAmelCase : Optional[Any] = idalabel
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
else:
_lowerCAmelCase : Any = 125
_lowerCAmelCase : Any = 6
_lowerCAmelCase : int = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
_lowerCAmelCase : Any = idalabel
_lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : int = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
_lowerCAmelCase : List[Any] = TableTransformerForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# verify our conversion
_lowerCAmelCase : Dict = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
_lowerCAmelCase : Optional[Any] = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=_lowerCamelCase )
_lowerCAmelCase : List[str] = Image.open(_lowerCamelCase ).convert('RGB' )
_lowerCAmelCase : Tuple = normalize(resize(_lowerCamelCase , _lowerCamelCase ) ).unsqueeze(0 )
_lowerCAmelCase : str = model(_lowerCamelCase )
if "detection" in checkpoint_url:
_lowerCAmelCase : List[Any] = (1, 15, 3)
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_lowerCAmelCase : Tuple = (1, 125, 7)
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_lowerCAmelCase : List[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
_lowerCAmelCase : str = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_lowerCamelCase )
image_processor.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """▁"""
_lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
_lowerCAmelCase = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
_lowerCAmelCase = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = ["input_ids", "attention_mask"]
_UpperCAmelCase = []
_UpperCAmelCase = []
def __init__( self ,_A ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A=None ,_A=None ,_A=None ,_A = None ,_A=None ,_A=False ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : int = legacy_behaviour
super().__init__(
bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,tokenizer_file=_A ,src_lang=_A ,tgt_lang=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,legacy_behaviour=_A ,**_A ,)
_lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_lowerCAmelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Any = len(self.sp_model )
_lowerCAmelCase : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : Tuple = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.__dict__.copy()
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
_lowerCAmelCase : Any = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Any = src_lang
_lowerCAmelCase : Union[str, Any] = self(_A ,add_special_tokens=_A ,return_tensors=_A ,**_A )
_lowerCAmelCase : str = self.convert_tokens_to_ids(_A )
_lowerCAmelCase : Union[str, Any] = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.encode(_A ,out_type=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : int = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Any = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __lowerCamelCase ( self ,_A ,_A = "eng_Latn" ,_A = None ,_A = "fra_Latn" ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Tuple = src_lang
_lowerCAmelCase : int = tgt_lang
return super().prepare_seqaseq_batch(_A ,_A ,**_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : Any = [self.cur_lang_code]
_lowerCAmelCase : Optional[Any] = [self.eos_token_id]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : List[str] = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = SwinConfig()
_lowerCAmelCase : Union[str, Any] = swin_name.split('_' )
_lowerCAmelCase : Dict = name_split[1]
_lowerCAmelCase : Optional[Any] = int(name_split[4] )
_lowerCAmelCase : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
_lowerCAmelCase : str = 96
_lowerCAmelCase : List[Any] = (2, 2, 6, 2)
_lowerCAmelCase : str = (3, 6, 12, 24)
elif model_size == "small":
_lowerCAmelCase : Optional[Any] = 96
_lowerCAmelCase : Dict = (2, 2, 18, 2)
_lowerCAmelCase : Tuple = (3, 6, 12, 24)
elif model_size == "base":
_lowerCAmelCase : Union[str, Any] = 128
_lowerCAmelCase : List[str] = (2, 2, 18, 2)
_lowerCAmelCase : int = (4, 8, 16, 32)
else:
_lowerCAmelCase : Any = 192
_lowerCAmelCase : str = (2, 2, 18, 2)
_lowerCAmelCase : Any = (6, 12, 24, 48)
if "in22k" in swin_name:
_lowerCAmelCase : Dict = 21841
else:
_lowerCAmelCase : Optional[int] = 1000
_lowerCAmelCase : Dict = 'huggingface/label-files'
_lowerCAmelCase : Any = 'imagenet-1k-id2label.json'
_lowerCAmelCase : int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Dict = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Any = img_size
_lowerCAmelCase : Dict = num_classes
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Optional[Any] = num_heads
_lowerCAmelCase : Tuple = window_size
return config
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_lowerCAmelCase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCAmelCase : Optional[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowerCAmelCase : List[Any] = 'encoder.' + name
if "attn.proj" in name:
_lowerCAmelCase : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCAmelCase : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCAmelCase : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCAmelCase : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : Tuple = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_lowerCAmelCase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowerCAmelCase : Union[str, Any] = 'layernorm.bias'
if "head" in name:
_lowerCAmelCase : int = name.replace('head' , 'classifier' )
else:
_lowerCAmelCase : str = 'swin.' + name
return name
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : Any = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCAmelCase : str = key.split('.' )
_lowerCAmelCase : List[str] = int(key_split[1] )
_lowerCAmelCase : Union[str, Any] = int(key_split[3] )
_lowerCAmelCase : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase : str = val[:dim, :]
_lowerCAmelCase : int = val[
dim : dim * 2, :
]
_lowerCAmelCase : str = val[-dim:, :]
else:
_lowerCAmelCase : Any = val[
:dim
]
_lowerCAmelCase : Union[str, Any] = val[
dim : dim * 2
]
_lowerCAmelCase : str = val[
-dim:
]
else:
_lowerCAmelCase : List[Any] = val
return orig_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
_lowerCAmelCase : Optional[int] = get_swin_config(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = SwinForImageClassification(_lowerCamelCase )
model.eval()
_lowerCAmelCase : List[str] = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_lowerCAmelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
_lowerCAmelCase : Optional[int] = image_processor(images=_lowerCamelCase , return_tensors='pt' )
_lowerCAmelCase : List[Any] = timm_model(inputs['pixel_values'] )
_lowerCAmelCase : int = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """MobileNetV1Config"""
# Base docstring
_lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
_lowerCAmelCase = [1, 1_0_2_4, 7, 7]
# Image classification docstring
_lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
_lowerCAmelCase = """tabby, tabby cat"""
_lowerCAmelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Tuple = model.mobilenet_va
else:
_lowerCAmelCase : Dict = model
_lowerCAmelCase : Optional[int] = 'MobilenetV1/Conv2d_0/'
_lowerCAmelCase : Optional[int] = backbone.conv_stem.convolution.weight
_lowerCAmelCase : Any = backbone.conv_stem.normalization.bias
_lowerCAmelCase : List[Any] = backbone.conv_stem.normalization.weight
_lowerCAmelCase : int = backbone.conv_stem.normalization.running_mean
_lowerCAmelCase : Union[str, Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_lowerCAmelCase : int = i + 1
_lowerCAmelCase : Optional[Any] = i * 2
_lowerCAmelCase : Optional[Any] = backbone.layer[pt_index]
_lowerCAmelCase : int = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_lowerCAmelCase : str = pointer.convolution.weight
_lowerCAmelCase : Optional[Any] = pointer.normalization.bias
_lowerCAmelCase : Union[str, Any] = pointer.normalization.weight
_lowerCAmelCase : int = pointer.normalization.running_mean
_lowerCAmelCase : int = pointer.normalization.running_var
_lowerCAmelCase : Optional[int] = backbone.layer[pt_index + 1]
_lowerCAmelCase : str = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_lowerCAmelCase : List[Any] = pointer.convolution.weight
_lowerCAmelCase : List[Any] = pointer.normalization.bias
_lowerCAmelCase : str = pointer.normalization.weight
_lowerCAmelCase : Optional[Any] = pointer.normalization.running_mean
_lowerCAmelCase : Optional[int] = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_lowerCAmelCase : Tuple = model.classifier.weight
_lowerCAmelCase : Dict = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
_lowerCAmelCase : int = tf.train.list_variables(_lowerCamelCase )
_lowerCAmelCase : Dict = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
_lowerCAmelCase : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = array
# Build TF to PyTorch weights loading map
_lowerCAmelCase : str = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
_lowerCAmelCase : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
_lowerCAmelCase : str = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
_lowerCAmelCase : Dict = array.squeeze().transpose()
else:
_lowerCAmelCase : List[str] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
_lowerCAmelCase : Any = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp' , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp_1' , _lowerCamelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , _lowerCamelCase )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = features.shape[-2:]
_lowerCAmelCase, _lowerCAmelCase : List[Any] = conv_layer.stride
_lowerCAmelCase, _lowerCAmelCase : List[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
_lowerCAmelCase : Dict = max(kernel_height - stride_height , 0 )
else:
_lowerCAmelCase : Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_lowerCAmelCase : List[str] = max(kernel_width - stride_width , 0 )
else:
_lowerCAmelCase : Optional[Any] = max(kernel_width - (in_width % stride_width) , 0 )
_lowerCAmelCase : Union[str, Any] = pad_along_width // 2
_lowerCAmelCase : str = pad_along_width - pad_left
_lowerCAmelCase : List[str] = pad_along_height // 2
_lowerCAmelCase : Optional[int] = pad_along_height - pad_top
_lowerCAmelCase : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A = 1 ,_A = 1 ,_A = False ,_A = True ,_A = True ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_lowerCAmelCase : Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_lowerCAmelCase : Optional[Any] = nn.Convad(
in_channels=_A ,out_channels=_A ,kernel_size=_A ,stride=_A ,padding=_A ,groups=_A ,bias=_A ,padding_mode='zeros' ,)
if use_normalization:
_lowerCAmelCase : Union[str, Any] = nn.BatchNormad(
num_features=_A ,eps=config.layer_norm_eps ,momentum=0.9_9_9_7 ,affine=_A ,track_running_stats=_A ,)
else:
_lowerCAmelCase : Dict = None
if use_activation:
if isinstance(_A ,_A ):
_lowerCAmelCase : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,_A ):
_lowerCAmelCase : List[Any] = ACTaFN[config.hidden_act]
else:
_lowerCAmelCase : Dict = config.hidden_act
else:
_lowerCAmelCase : Any = None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.config.tf_padding:
_lowerCAmelCase : Any = apply_tf_padding(_A ,self.convolution )
_lowerCAmelCase : int = self.convolution(_A )
if self.normalization is not None:
_lowerCAmelCase : Tuple = self.normalization(_A )
if self.activation is not None:
_lowerCAmelCase : Optional[int] = self.activation(_A )
return features
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = MobileNetVaConfig
_UpperCAmelCase = load_tf_weights_in_mobilenet_va
_UpperCAmelCase = "mobilenet_v1"
_UpperCAmelCase = "pixel_values"
_UpperCAmelCase = False
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if isinstance(_A ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_lowerCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A = True ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Optional[Any] = config
_lowerCAmelCase : Any = 32
_lowerCAmelCase : List[Any] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
_lowerCAmelCase : Any = MobileNetVaConvLayer(
_A ,in_channels=config.num_channels ,out_channels=_A ,kernel_size=3 ,stride=2 ,)
_lowerCAmelCase : List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_lowerCAmelCase : Tuple = nn.ModuleList()
for i in range(13 ):
_lowerCAmelCase : Any = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_lowerCAmelCase : str = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_A ,in_channels=_A ,out_channels=_A ,kernel_size=3 ,stride=strides[i] ,groups=_A ,) )
self.layer.append(
MobileNetVaConvLayer(
_A ,in_channels=_A ,out_channels=_A ,kernel_size=1 ,) )
_lowerCAmelCase : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_lowerCAmelCase : Any = self.conv_stem(_A )
_lowerCAmelCase : Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_lowerCAmelCase : Tuple = layer_module(_A )
if output_hidden_states:
_lowerCAmelCase : int = all_hidden_states + (hidden_states,)
_lowerCAmelCase : Dict = hidden_states
if self.pooler is not None:
_lowerCAmelCase : Union[str, Any] = torch.flatten(self.pooler(_A ) ,start_dim=1 )
else:
_lowerCAmelCase : Tuple = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A ,pooler_output=_A ,hidden_states=_A ,)
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Dict = config.num_labels
_lowerCAmelCase : List[str] = MobileNetVaModel(_A )
_lowerCAmelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_lowerCAmelCase : Dict = nn.Dropout(config.classifier_dropout_prob ,inplace=_A )
_lowerCAmelCase : List[str] = nn.Linear(_A ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Any = self.mobilenet_va(_A ,output_hidden_states=_A ,return_dict=_A )
_lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : Optional[int] = self.classifier(self.dropout(_A ) )
_lowerCAmelCase : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : List[Any] = 'single_label_classification'
else:
_lowerCAmelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[Any] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowerCAmelCase : Union[str, Any] = loss_fct(_A ,_A )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Union[str, Any] = CrossEntropyLoss()
_lowerCAmelCase : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : Any = BCEWithLogitsLoss()
_lowerCAmelCase : int = loss_fct(_A ,_A )
if not return_dict:
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_A ,logits=_A ,hidden_states=outputs.hidden_states ,)
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not sentence:
return ""
_lowerCAmelCase : List[str] = dict(zip(_lowerCamelCase , _lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = AlbertConfig.from_json_file(_lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
_lowerCAmelCase : List[Any] = AlbertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return choice(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = random_pivot(_lowerCamelCase )
# partition based on pivot
# linear time
_lowerCAmelCase : List[str] = [e for e in lst if e < pivot]
_lowerCAmelCase : int = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowerCamelCase ) < k - 1:
return kth_number(_lowerCamelCase , k - len(_lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if number < 0:
return False
_lowerCAmelCase : str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import os
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
_lowerCAmelCase : List[str] = str(file.readlines()[0] )
_lowerCAmelCase : Optional[Any] = names.replace('"' , '' ).split(',' )
names.sort()
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Union[str, Any] = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
_lowerCAmelCase : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "data2vec-vision"
def __init__( self ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1E-12 ,_A=224 ,_A=16 ,_A=3 ,_A=False ,_A=False ,_A=False ,_A=False ,_A=0.1 ,_A=0.1 ,_A=True ,_A=[3, 5, 7, 11] ,_A=[1, 2, 3, 6] ,_A=True ,_A=0.4 ,_A=256 ,_A=1 ,_A=False ,_A=255 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Optional[Any] = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : int = use_relative_position_bias
_lowerCAmelCase : int = use_shared_relative_position_bias
_lowerCAmelCase : Optional[Any] = layer_scale_init_value
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : int = out_indices
_lowerCAmelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Any = auxiliary_loss_weight
_lowerCAmelCase : Union[str, Any] = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : Tuple = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """PoolFormerConfig"""
# Base docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = [1, 5_1_2, 7, 7]
# Image classification docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = """tabby, tabby cat"""
_lowerCAmelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
_lowerCAmelCase : List[str] = 1 - drop_prob
_lowerCAmelCase : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_lowerCAmelCase : str = keep_prob + torch.rand(_lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_lowerCAmelCase : Any = input.div(_lowerCamelCase ) * random_tensor
return output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = drop_prob
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return drop_path(_A ,self.drop_prob ,self.training )
def __lowerCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = patch_size if isinstance(_A ,collections.abc.Iterable ) else (patch_size, patch_size)
_lowerCAmelCase : Union[str, Any] = stride if isinstance(_A ,collections.abc.Iterable ) else (stride, stride)
_lowerCAmelCase : Optional[Any] = padding if isinstance(_A ,collections.abc.Iterable ) else (padding, padding)
_lowerCAmelCase : List[Any] = nn.Convad(_A ,_A ,kernel_size=_A ,stride=_A ,padding=_A )
_lowerCAmelCase : Any = norm_layer(_A ) if norm_layer else nn.Identity()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.projection(_A )
_lowerCAmelCase : Union[str, Any] = self.norm(_A )
return embeddings
class __UpperCamelCase ( nn.GroupNorm ):
def __init__( self ,_A ,**_A ):
'''simple docstring'''
super().__init__(1 ,_A ,**_A )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.AvgPoolad(_A ,stride=1 ,padding=pool_size // 2 ,count_include_pad=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.pool(_A ) - hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = nn.Convad(_A ,_A ,1 )
_lowerCAmelCase : Optional[Any] = nn.Convad(_A ,_A ,1 )
_lowerCAmelCase : Union[str, Any] = PoolFormerDropPath(_A )
if isinstance(config.hidden_act ,_A ):
_lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
_lowerCAmelCase : str = config.hidden_act
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.conva(_A )
_lowerCAmelCase : Optional[Any] = self.act_fn(_A )
_lowerCAmelCase : List[str] = self.drop(_A )
_lowerCAmelCase : Union[str, Any] = self.conva(_A )
_lowerCAmelCase : Any = self.drop(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = PoolFormerPooling(_A )
_lowerCAmelCase : int = PoolFormerOutput(_A ,_A ,_A ,_A )
_lowerCAmelCase : List[Any] = PoolFormerGroupNorm(_A )
_lowerCAmelCase : Dict = PoolFormerGroupNorm(_A )
# Useful for training neural nets
_lowerCAmelCase : Optional[Any] = PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
_lowerCAmelCase : Any = config.use_layer_scale
if config.use_layer_scale:
_lowerCAmelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) ,requires_grad=_A )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) ,requires_grad=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.use_layer_scale:
_lowerCAmelCase : Optional[int] = self.pooling(self.before_norm(_A ) )
_lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_lowerCAmelCase : Union[str, Any] = hidden_states + self.drop_path(_A )
_lowerCAmelCase : Union[str, Any] = ()
_lowerCAmelCase : Optional[int] = self.output(self.after_norm(_A ) )
_lowerCAmelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_lowerCAmelCase : int = hidden_states + self.drop_path(_A )
_lowerCAmelCase : int = (output,) + outputs
return outputs
else:
_lowerCAmelCase : List[Any] = self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
_lowerCAmelCase : int = pooling_output + hidden_states
_lowerCAmelCase : List[str] = ()
# Second residual connection inside the PoolFormerOutput block
_lowerCAmelCase : Tuple = self.drop_path(self.output(self.after_norm(_A ) ) )
_lowerCAmelCase : str = hidden_states + layer_output
_lowerCAmelCase : Union[str, Any] = (output,) + outputs
return outputs
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
_lowerCAmelCase : str = [x.item() for x in torch.linspace(0 ,config.drop_path_rate ,sum(config.depths ) )]
# patch embeddings
_lowerCAmelCase : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] ,stride=config.strides[i] ,padding=config.padding[i] ,num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] ,hidden_size=config.hidden_sizes[i] ,) )
_lowerCAmelCase : Dict = nn.ModuleList(_A )
# Transformer blocks
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_lowerCAmelCase : int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A ,num_channels=config.hidden_sizes[i] ,pool_size=config.pool_size ,hidden_size=config.hidden_sizes[i] ,intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) ,drop_path=dpr[cur + j] ,) )
blocks.append(nn.ModuleList(_A ) )
_lowerCAmelCase : Tuple = nn.ModuleList(_A )
def __lowerCamelCase ( self ,_A ,_A=False ,_A=True ):
'''simple docstring'''
_lowerCAmelCase : Dict = () if output_hidden_states else None
_lowerCAmelCase : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings ,self.block ) ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = layers
# Get patch embeddings from hidden_states
_lowerCAmelCase : Dict = embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
_lowerCAmelCase : Optional[int] = blk(_A )
_lowerCAmelCase : int = layer_outputs[0]
if output_hidden_states:
_lowerCAmelCase : List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A ,hidden_states=_A )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = PoolFormerConfig
_UpperCAmelCase = "poolformer"
_UpperCAmelCase = "pixel_values"
_UpperCAmelCase = True
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if isinstance(_A ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A ,nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __lowerCamelCase ( self ,_A ,_A=False ):
'''simple docstring'''
if isinstance(_A ,_A ):
_lowerCAmelCase : Any = value
_lowerCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : int = PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_lowerCAmelCase : List[Any] = self.encoder(
_A ,output_hidden_states=_A ,return_dict=_A ,)
_lowerCAmelCase : Optional[int] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A ,hidden_states=encoder_outputs.hidden_states ,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = nn.Linear(config.hidden_size ,config.hidden_size )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.dense(_A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = PoolFormerModel(_A )
# Final norm
_lowerCAmelCase : Tuple = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_lowerCAmelCase : Tuple = (
nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.poolformer(
_A ,output_hidden_states=_A ,return_dict=_A ,)
_lowerCAmelCase : Tuple = outputs[0]
_lowerCAmelCase : Any = self.classifier(self.norm(_A ).mean([-2, -1] ) )
_lowerCAmelCase : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : str = 'single_label_classification'
else:
_lowerCAmelCase : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase : Tuple = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Union[str, Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowerCAmelCase : List[str] = loss_fct(_A ,_A )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : List[str] = BCEWithLogitsLoss()
_lowerCAmelCase : Any = loss_fct(_A ,_A )
if not return_dict:
_lowerCAmelCase : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A ,logits=_A ,hidden_states=outputs.hidden_states )
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_lowerCAmelCase = """main"""
# Default branch name
_lowerCAmelCase = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
_lowerCAmelCase = """aaaaaaa"""
# This commit does not exist, so we should 404.
_lowerCAmelCase = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
_lowerCAmelCase = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def lowerCamelCase__ ( ):
'''simple docstring'''
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def lowerCamelCase__ ( ):
'''simple docstring'''
print('Bonjour!' )
yield
print('Au revoir!' )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class __UpperCamelCase ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(find_labels(_A ) ,['labels'] )
self.assertEqual(find_labels(_A ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_A ) ,['start_positions', 'end_positions'] )
class __UpperCamelCase ( a__ ):
pass
self.assertEqual(find_labels(_A ) ,['labels'] )
@require_tf
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(find_labels(_A ) ,['labels'] )
self.assertEqual(find_labels(_A ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_A ) ,['start_positions', 'end_positions'] )
class __UpperCamelCase ( a__ ):
pass
self.assertEqual(find_labels(_A ) ,['labels'] )
@require_flax
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(find_labels(_A ) ,[] )
self.assertEqual(find_labels(_A ) ,[] )
self.assertEqual(find_labels(_A ) ,[] )
class __UpperCamelCase ( a__ ):
pass
self.assertEqual(find_labels(_A ) ,[] )
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = hidden_states.shape
_lowerCAmelCase : List[str] = jax.image.resize(
_A ,shape=(batch, height * 2, width * 2, channels) ,method='nearest' ,)
_lowerCAmelCase : Tuple = self.conv(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.conv(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = 0.0
_UpperCAmelCase = None
_UpperCAmelCase = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
_lowerCAmelCase : Union[str, Any] = nn.Conv(
_A ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
_lowerCAmelCase : Tuple = nn.Dense(_A ,dtype=self.dtype )
_lowerCAmelCase : str = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
_lowerCAmelCase : List[str] = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : str = nn.Conv(
_A ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
_lowerCAmelCase : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : Tuple = None
if use_nin_shortcut:
_lowerCAmelCase : List[str] = nn.Conv(
_A ,kernel_size=(1, 1) ,strides=(1, 1) ,padding='VALID' ,dtype=self.dtype ,)
def __call__( self ,_A ,_A ,_A=True ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_states
_lowerCAmelCase : Optional[Any] = self.norma(_A )
_lowerCAmelCase : Optional[Any] = nn.swish(_A )
_lowerCAmelCase : str = self.conva(_A )
_lowerCAmelCase : List[str] = self.time_emb_proj(nn.swish(_A ) )
_lowerCAmelCase : List[Any] = jnp.expand_dims(jnp.expand_dims(_A ,1 ) ,1 )
_lowerCAmelCase : Dict = hidden_states + temb
_lowerCAmelCase : str = self.norma(_A )
_lowerCAmelCase : Optional[int] = nn.swish(_A )
_lowerCAmelCase : int = self.dropout(_A ,_A )
_lowerCAmelCase : Dict = self.conva(_A )
if self.conv_shortcut is not None:
_lowerCAmelCase : Union[str, Any] = self.conv_shortcut(_A )
return hidden_states + residual
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCAmelCase : List[Any] = len(_A ) - 1
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCAmelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,_A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ) ,5 ) == 1
return output_values
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCAmelCase : Tuple = self.basis_function(_A )
_lowerCAmelCase : Optional[Any] = 0.0
_lowerCAmelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __lowerCamelCase ( self ,_A = 0.0_1 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
_lowerCAmelCase : list[float] = [] # x coordinates of points to plot
_lowerCAmelCase : list[float] = [] # y coordinates of points to plot
_lowerCAmelCase : Any = 0.0
while t <= 1:
_lowerCAmelCase : str = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCAmelCase : Any = [i[0] for i in self.list_of_points]
_lowerCAmelCase : Any = [i[1] for i in self.list_of_points]
plt.plot(
_A ,_A ,color='blue' ,label='Curve of Degree ' + str(self.degree ) ,)
plt.scatter(_A ,_A ,color='red' ,label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=0.999 , _lowerCamelCase="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCAmelCase : Union[str, Any] = []
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = i / num_diffusion_timesteps
_lowerCAmelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self ,_A = 1000 ,_A = 0.0_0_0_8_5 ,_A = 0.0_1_2 ,_A = "linear" ,_A = None ,_A = "epsilon" ,_A = "linspace" ,_A = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_lowerCAmelCase : str = torch.tensor(_A ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase : Union[str, Any] = torch.linspace(_A ,_A ,_A ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : str = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_A ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : Optional[Any] = betas_for_alpha_bar(_A )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowerCAmelCase : Optional[int] = 1.0 - self.betas
_lowerCAmelCase : Tuple = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_A ,_A ,_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
if schedule_timesteps is None:
_lowerCAmelCase : Optional[Any] = self.timesteps
_lowerCAmelCase : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCAmelCase : Tuple = 1 if len(_A ) > 1 else 0
else:
_lowerCAmelCase : int = timestep.cpu().item() if torch.is_tensor(_A ) else timestep
_lowerCAmelCase : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCamelCase ( self ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : int = self.index_for_timestep(_A )
if self.state_in_first_order:
_lowerCAmelCase : Tuple = self.sigmas[step_index]
else:
_lowerCAmelCase : int = self.sigmas_interpol[step_index]
_lowerCAmelCase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Any = num_inference_steps
_lowerCAmelCase : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCAmelCase : Any = np.linspace(0 ,num_train_timesteps - 1 ,_A ,dtype=_A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCAmelCase : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : int = (np.arange(0 ,_A ) * step_ratio).round()[::-1].copy().astype(_A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCAmelCase : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : Tuple = (np.arange(_A ,0 ,-step_ratio )).round().copy().astype(_A )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowerCAmelCase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCAmelCase : List[Any] = torch.from_numpy(np.log(_A ) ).to(_A )
_lowerCAmelCase : Optional[int] = np.interp(_A ,np.arange(0 ,len(_A ) ) ,_A )
_lowerCAmelCase : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).to(device=_A )
# interpolate sigmas
_lowerCAmelCase : str = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowerCAmelCase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCAmelCase : int = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_A ).startswith('mps' ):
# mps does not support float64
_lowerCAmelCase : int = torch.from_numpy(_A ).to(_A ,dtype=torch.floataa )
else:
_lowerCAmelCase : Any = torch.from_numpy(_A ).to(_A )
# interpolate timesteps
_lowerCAmelCase : List[Any] = self.sigma_to_t(_A ).to(_A ,dtype=timesteps.dtype )
_lowerCAmelCase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowerCAmelCase : List[str] = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCAmelCase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCAmelCase : str = defaultdict(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = sigma.log()
# get distribution
_lowerCAmelCase : str = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCAmelCase : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCAmelCase : List[str] = low_idx + 1
_lowerCAmelCase : Optional[Any] = self.log_sigmas[low_idx]
_lowerCAmelCase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCAmelCase : List[Any] = (low - log_sigma) / (low - high)
_lowerCAmelCase : str = w.clamp(0 ,1 )
# transform interpolation to time range
_lowerCAmelCase : int = (1 - w) * low_idx + w * high_idx
_lowerCAmelCase : Optional[int] = t.view(sigma.shape )
return t
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.sample is None
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.index_for_timestep(_A )
# advance index counter by 1
_lowerCAmelCase : str = timestep.cpu().item() if torch.is_tensor(_A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCAmelCase : Optional[Any] = self.sigmas[step_index]
_lowerCAmelCase : Optional[int] = self.sigmas_interpol[step_index + 1]
_lowerCAmelCase : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCAmelCase : Any = self.sigmas[step_index - 1]
_lowerCAmelCase : List[Any] = self.sigmas_interpol[step_index]
_lowerCAmelCase : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCAmelCase : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : str = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCAmelCase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCAmelCase : Optional[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCAmelCase : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCAmelCase : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCAmelCase : Tuple = sigma_next - sigma_hat
_lowerCAmelCase : int = self.sample
_lowerCAmelCase : str = None
_lowerCAmelCase : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_A ):
# mps does not support float64
_lowerCAmelCase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowerCAmelCase : List[str] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowerCAmelCase : Optional[Any] = self.timesteps.to(original_samples.device )
_lowerCAmelCase : Optional[int] = timesteps.to(original_samples.device )
_lowerCAmelCase : Tuple = [self.index_for_timestep(_A ,_A ) for t in timesteps]
_lowerCAmelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCAmelCase : int = sigma.unsqueeze(-1 )
_lowerCAmelCase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = super().to_dict()
for k, v in d.items():
if isinstance(_A ,_A ):
_lowerCAmelCase : Tuple = v.to_dict()
return d
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A = True ,_A = None ,_A = 32 ,_A = True ,_A = 1 / 255 ,_A = True ,_A = True ,_A = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,_A = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,_A = True ,_A=7 ,_A=30 ,_A=400 ,_A=3 ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : List[Any] = do_resize
_lowerCAmelCase : Dict = size if size is not None else {'shortest_edge': 288}
_lowerCAmelCase : str = size_divisor
_lowerCAmelCase : List[str] = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : str = do_center_crop
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Dict = image_std
_lowerCAmelCase : int = do_pad
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[str] = min_resolution
_lowerCAmelCase : Dict = max_resolution
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCamelCase ( self ,_A ,_A=False ):
'''simple docstring'''
if not batched:
_lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
_lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(_A ,Image.Image ):
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = image.size
else:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
_lowerCAmelCase : Union[str, Any] = size / min(_A ,_A )
if h < w:
_lowerCAmelCase, _lowerCAmelCase : List[str] = size, scale * w
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = scale * h, size
_lowerCAmelCase : Optional[int] = int((1333 / 800) * size )
if max(_A ,_A ) > max_size:
_lowerCAmelCase : Union[str, Any] = max_size / max(_A ,_A )
_lowerCAmelCase : Optional[Any] = newh * scale
_lowerCAmelCase : Any = neww * scale
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCAmelCase : List[Any] = []
for image in image_inputs:
_lowerCAmelCase, _lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase : Union[str, Any] = max(_A ,key=lambda _A : item[0] )[0]
_lowerCAmelCase : List[str] = max(_A ,key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A ,'image_mean' ) )
self.assertTrue(hasattr(_A ,'image_std' ) )
self.assertTrue(hasattr(_A ,'do_normalize' ) )
self.assertTrue(hasattr(_A ,'do_resize' ) )
self.assertTrue(hasattr(_A ,'size' ) )
self.assertTrue(hasattr(_A ,'size_divisor' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A ,Image.Image )
# Test not batched input
_lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowerCAmelCase, _lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(_A ,return_tensors='pt' ).pixel_values
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ,batched=_A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,np.ndarray )
# Test not batched input
_lowerCAmelCase : int = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowerCAmelCase : str = image_processing(_A ,return_tensors='pt' ).pixel_values
_lowerCAmelCase, _lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_A ,batched=_A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,torch.Tensor )
# Test not batched input
_lowerCAmelCase : int = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowerCAmelCase, _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowerCAmelCase : List[str] = image_processing(_A ,return_tensors='pt' ).pixel_values
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(_A ,batched=_A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : Any = downstream_dict['projector.weight']
_lowerCAmelCase : Any = downstream_dict['projector.bias']
_lowerCAmelCase : List[Any] = downstream_dict['model.post_net.linear.weight']
_lowerCAmelCase : List[Any] = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = downstream_dict['model.linear.weight']
_lowerCAmelCase : int = downstream_dict['model.linear.bias']
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : Tuple = downstream_dict['connector.weight']
_lowerCAmelCase : List[Any] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCAmelCase : Optional[int] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_lowerCAmelCase : str = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_lowerCAmelCase : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_lowerCAmelCase : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_lowerCAmelCase : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_lowerCAmelCase : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_lowerCAmelCase : str = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location='cpu' )
_lowerCAmelCase : Dict = checkpoint['Downstream']
_lowerCAmelCase : Union[str, Any] = UniSpeechSatConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
_lowerCAmelCase : List[str] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_lowerCAmelCase : Dict = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
_lowerCAmelCase : int = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('ForXVector' ):
_lowerCAmelCase : Any = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_lowerCAmelCase : int = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "open-llama"
def __init__( self ,_A=10_0000 ,_A=4096 ,_A=1_1008 ,_A=32 ,_A=32 ,_A="silu" ,_A=2048 ,_A=0.0_2 ,_A=1E-6 ,_A=True ,_A=0 ,_A=1 ,_A=2 ,_A=False ,_A=True ,_A=0.1 ,_A=0.1 ,_A=True ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Optional[int] = rms_norm_eps
_lowerCAmelCase : Tuple = use_cache
_lowerCAmelCase : int = kwargs.pop(
'use_memorry_efficient_attention' ,_A )
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_dropout_prob
_lowerCAmelCase : int = use_stable_embedding
_lowerCAmelCase : int = shared_input_output_embedding
_lowerCAmelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,tie_word_embeddings=_A ,**_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_lowerCAmelCase : Optional[Any] = self.rope_scaling.get('type' ,_A )
_lowerCAmelCase : Tuple = self.rope_scaling.get('factor' ,_A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A ,_A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "mctct"
def __init__( self ,_A=8065 ,_A=1536 ,_A=36 ,_A=6144 ,_A=4 ,_A=384 ,_A=920 ,_A=1E-5 ,_A=0.3 ,_A="relu" ,_A=0.0_2 ,_A=0.3 ,_A=0.3 ,_A=1 ,_A=0 ,_A=2 ,_A=1 ,_A=0.3 ,_A=1 ,_A=(7,) ,_A=(3,) ,_A=80 ,_A=1 ,_A=None ,_A="sum" ,_A=False ,**_A ,):
'''simple docstring'''
super().__init__(**_A ,pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Optional[Any] = attention_head_dim
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : Union[str, Any] = eos_token_id
_lowerCAmelCase : str = conv_glu_dim
_lowerCAmelCase : int = conv_dropout
_lowerCAmelCase : str = num_conv_layers
_lowerCAmelCase : Union[str, Any] = input_feat_per_channel
_lowerCAmelCase : Any = input_channels
_lowerCAmelCase : Optional[int] = conv_channels
_lowerCAmelCase : str = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowerCAmelCase : Any = list(_A )
_lowerCAmelCase : List[Any] = list(_A )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Dict = embeddings_size
_lowerCAmelCase : Optional[Any] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : Optional[int] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFRegNetModel(config=_A )
_lowerCAmelCase : Any = model(_A ,training=_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : int = TFRegNetForImageClassification(_A )
_lowerCAmelCase : Any = model(_A ,labels=_A ,training=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFRegNetModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 ,reason='TF does not support backprop for grouped convolutions on CPU.' ,)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(_A )
_lowerCAmelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : List[Any] = model_class(_A )
_lowerCAmelCase : str = model(**self._prepare_for_class(_A ,_A ) ,training=_A )
_lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Tuple = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : int = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_A ,_A ,_A ,_A={} ):
_lowerCAmelCase : Optional[int] = model(_A ,return_dict=_A ,**_A )
_lowerCAmelCase : List[str] = model(_A ,return_dict=_A ,**_A ).to_tuple()
def recursive_check(_A ,_A ):
if isinstance(_A ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A ,_A ):
recursive_check(_A ,_A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_A ,_A ) ) ,msg=(
'Tuple and dict output are not equal. Difference:'
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) ,)
recursive_check(_A ,_A )
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(_A )
_lowerCAmelCase : List[Any] = self._prepare_for_class(_A ,_A )
_lowerCAmelCase : Any = self._prepare_for_class(_A ,_A )
check_equivalence(_A ,_A ,_A )
_lowerCAmelCase : int = self._prepare_for_class(_A ,_A ,return_labels=_A )
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_A ,_A ,return_labels=_A )
check_equivalence(_A ,_A ,_A )
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_A ,_A )
_lowerCAmelCase : Any = self._prepare_for_class(_A ,_A )
check_equivalence(_A ,_A ,_A ,{'output_hidden_states': True} )
_lowerCAmelCase : Tuple = self._prepare_for_class(_A ,_A ,return_labels=_A )
_lowerCAmelCase : Dict = self._prepare_for_class(_A ,_A ,return_labels=_A )
check_equivalence(_A ,_A ,_A ,{'output_hidden_states': True} )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = TFRegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : List[str] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : List[str] = model(**_A ,training=_A )
# verify the logits
_lowerCAmelCase : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Optional[int] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] ,_A ,atol=1E-4 )
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase = 2000000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [0 for i in range(n + 1 )]
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : List[str] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowerCamelCase ):
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Any = 0
for i in range(_lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : List[str] = f"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCAmelCase : int = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : Dict = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : Optional[Any] = 'https://www.amazon.in/' + item.ha.a['href']
_lowerCAmelCase : Optional[int] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
_lowerCAmelCase : Any = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
_lowerCAmelCase : Optional[int] = 'Not available'
try:
_lowerCAmelCase : Optional[Any] = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[int] = ''
try:
_lowerCAmelCase : List[str] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[int] = float('nan' )
except AttributeError:
pass
_lowerCAmelCase : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : Optional[Any] = ' '
_lowerCAmelCase : Tuple = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCAmelCase = """headphones"""
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCamelCase , 2 ) - pow(_lowerCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCamelCase , 2 ) - pow(_lowerCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCamelCase , 2 ) + pow(_lowerCamelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowerCAmelCase = 2_9_9_7_9_2_4_5_8
# Symbols
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = symbols("""ct x y z""")
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return 1 / sqrt(1 - beta(_lowerCamelCase ) ** 2 )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return np.array(
[
[gamma(_lowerCamelCase ), -gamma(_lowerCamelCase ) * beta(_lowerCamelCase ), 0, 0],
[-gamma(_lowerCamelCase ) * beta(_lowerCamelCase ), gamma(_lowerCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if event is None:
_lowerCAmelCase : Optional[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowerCAmelCase = transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_lowerCAmelCase = {ct: c, x: 1, y: 1, z: 1}
_lowerCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowerCAmelCase : str = 6
_lowerCAmelCase : Any = 1
_lowerCAmelCase : int = 1901
_lowerCAmelCase : Union[str, Any] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowerCAmelCase : Dict = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowerCAmelCase : str = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.