code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import math
UpperCAmelCase_ : Tuple = '2020.9.26'
UpperCAmelCase_ : List[str] = 'xcodz-dot, cclaus, dhruvmanila'
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in locals().values() ):
_lowerCamelCase : Optional[Any] = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(UpperCamelCase__ )
_lowerCamelCase : int = ((x * distance) / (z + distance)) * scale
_lowerCamelCase : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : str , _lowerCAmelCase : float ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Axis must be a str" )
_lowerCamelCase : Any = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in input_variables.values() ):
_lowerCamelCase : str = (
"""Input values except axis must either be float or int: """
F'{list(input_variables.values() )}'
)
raise TypeError(UpperCamelCase__ )
_lowerCamelCase : Optional[int] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_lowerCamelCase : Optional[Any] = x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
_lowerCamelCase : Optional[int] = y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
_lowerCamelCase : str = z
elif axis == "x":
_lowerCamelCase : str = y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
_lowerCamelCase : int = z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
_lowerCamelCase : Dict = x
elif axis == "y":
_lowerCamelCase : List[Any] = x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
_lowerCamelCase : List[str] = z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
_lowerCamelCase : List[Any] = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''') | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def A_ ( ) -> Tuple:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase_ : Union[str, Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
_lowerCamelCase : Any = os.path.join(_lowerCAmelCase , "words.txt" )
_lowerCamelCase : Dict = ""
with open(_lowerCAmelCase ) as f:
_lowerCamelCase : List[str] = f.readline()
_lowerCamelCase : Optional[Any] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_lowerCamelCase : List[str] = [
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution()) | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCAmelCase_ : Tuple = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
UpperCAmelCase_ : Optional[Any] = json.load(f)
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int,__A : Optional[Any] ):
return FSMTTokenizer.from_pretrained(__A )
def lowerCamelCase_ ( self : Tuple,__A : Tuple ):
_lowerCamelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(__A ).to(__A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self : int,__A : Optional[Any],__A : Any ):
_lowerCamelCase : Tuple = f'facebook/wmt19-{pair}'
_lowerCamelCase : Optional[int] = self.get_tokenizer(__A )
_lowerCamelCase : Tuple = self.get_model(__A )
_lowerCamelCase : List[str] = bleu_data[pair]["src"]
_lowerCamelCase : int = bleu_data[pair]["tgt"]
_lowerCamelCase : Optional[Any] = tokenizer(__A,return_tensors="pt",truncation=__A,padding="longest" ).to(__A )
_lowerCamelCase : List[str] = model.generate(
input_ids=batch.input_ids,num_beams=8,)
_lowerCamelCase : Any = tokenizer.batch_decode(
__A,skip_special_tokens=__A,clean_up_tokenization_spaces=__A )
_lowerCamelCase : Dict = calculate_bleu(__A,__A )
print(__A )
self.assertGreaterEqual(scores["bleu"],__A ) | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : Union[str, Any],__A : Dict=1_3,__A : List[str]=3_2,__A : Tuple=3,__A : Union[str, Any]=4,__A : Optional[int]=[1_0, 2_0, 3_0, 4_0],__A : List[Any]=[2, 2, 3, 2],__A : int=True,__A : Union[str, Any]=True,__A : str=3_7,__A : Tuple="gelu",__A : Tuple=1_0,__A : Any=0.02,__A : int=["stage2", "stage3", "stage4"],__A : List[str]=[2, 3, 4],__A : int=None,):
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Any = num_stages
_lowerCamelCase : List[Any] = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = out_features
_lowerCamelCase : Any = out_indices
_lowerCamelCase : Optional[Any] = scope
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size],self.num_labels )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels,hidden_sizes=self.hidden_sizes,depths=self.depths,num_stages=self.num_stages,hidden_act=self.hidden_act,is_decoder=__UpperCamelCase,initializer_range=self.initializer_range,out_features=self.out_features,out_indices=self.out_indices,num_labels=self.num_labels,)
def lowerCamelCase_ ( self : List[Any],__A : List[str],__A : List[str],__A : Union[str, Any] ):
_lowerCamelCase : List[str] = ConvNextVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowerCamelCase : int = model(__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),)
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : Optional[int],__A : List[Any] ):
_lowerCamelCase : Tuple = ConvNextVaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowerCamelCase : str = model(__UpperCamelCase,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict,__A : Union[str, Any],__A : Any,__A : List[str] ):
_lowerCamelCase : Any = ConvNextVaBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Union[str, Any] = ConvNextVaBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowerCamelCase : List[str] = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __snake_case , __snake_case , unittest.TestCase ):
lowerCAmelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Tuple = ConvNextVaModelTester(self )
_lowerCamelCase : str = ConfigTester(self,config_class=__UpperCamelCase,has_text_modality=__UpperCamelCase,hidden_size=3_7 )
def lowerCamelCase_ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowerCamelCase_ ( self : Tuple ):
pass
def lowerCamelCase_ ( self : List[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : List[str] = True
if model_class.__name__ in [
*get_values(__UpperCamelCase ),
*get_values(__UpperCamelCase ),
]:
continue
_lowerCamelCase : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_lowerCamelCase : List[str] = self._prepare_for_class(__UpperCamelCase,__UpperCamelCase,return_labels=__UpperCamelCase )
_lowerCamelCase : Union[str, Any] = model(**__UpperCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__UpperCamelCase ), *get_values(__UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCamelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(__UpperCamelCase,__UpperCamelCase,return_labels=__UpperCamelCase )
_lowerCamelCase : Optional[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__UpperCamelCase )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1],__UpperCamelCase )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self : Any ):
def check_hidden_states_output(__A : Union[str, Any],__A : Union[str, Any],__A : Optional[int] ):
_lowerCamelCase : Union[str, Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__UpperCamelCase,__UpperCamelCase ) )
_lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ),expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = True
check_hidden_states_output(__UpperCamelCase,__UpperCamelCase,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(__UpperCamelCase,__UpperCamelCase,__UpperCamelCase )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase_ ( self : Dict ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = ConvNextVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : str ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__UpperCamelCase )
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : Optional[int] = preprocessor(images=__UpperCamelCase,return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**__UpperCamelCase )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__UpperCamelCase )
_lowerCamelCase : List[str] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__UpperCamelCase,atol=1e-4 ) ) | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_lowerCamelCase : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A_ ( _lowerCAmelCase : Union[str, Any] = 1000 ):
"""simple docstring"""
_lowerCamelCase : Dict = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
lowerCAmelCase_ = PegasusConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self : Optional[int],__A : int,__A : Optional[int]=1_3,__A : List[str]=7,__A : Optional[Any]=True,__A : int=False,__A : int=9_9,__A : Optional[Any]=3_2,__A : str=2,__A : Any=4,__A : Dict=3_7,__A : List[str]=0.1,__A : Optional[Any]=0.1,__A : List[str]=4_0,__A : Tuple=2,__A : Union[str, Any]=1,__A : Optional[int]=0,):
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Any = eos_token_id
_lowerCamelCase : List[Any] = pad_token_id
_lowerCamelCase : Union[str, Any] = bos_token_id
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size )
_lowerCamelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ),1 )
_lowerCamelCase : List[Any] = tf.concat([input_ids, eos_tensor],axis=1 )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Any = self.config_cls(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_ids=[2],bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,decoder_start_token_id=self.pad_token_id,**self.config_updates,)
_lowerCamelCase : int = prepare_pegasus_inputs_dict(lowercase__,lowercase__,lowercase__ )
return config, inputs_dict
def lowerCamelCase_ ( self : int,__A : List[str],__A : Union[str, Any] ):
_lowerCamelCase : Dict = TFPegasusModel(config=lowercase__ ).get_decoder()
_lowerCamelCase : int = inputs_dict["""input_ids"""]
_lowerCamelCase : Dict = input_ids[:1, :]
_lowerCamelCase : Tuple = inputs_dict["""attention_mask"""][:1, :]
_lowerCamelCase : Optional[Any] = inputs_dict["""head_mask"""]
_lowerCamelCase : Dict = 1
# first forward pass
_lowerCamelCase : Tuple = model(lowercase__,attention_mask=lowercase__,head_mask=lowercase__,use_cache=lowercase__ )
_lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3),config.vocab_size )
_lowerCamelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3),2 ),tf.inta )
# append to next input_ids and
_lowerCamelCase : Optional[int] = tf.concat([input_ids, next_tokens],axis=-1 )
_lowerCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask],axis=-1 )
_lowerCamelCase : Optional[int] = model(lowercase__,attention_mask=lowercase__ )[0]
_lowerCamelCase : Optional[int] = model(lowercase__,attention_mask=lowercase__,past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1],output_from_past.shape[1] )
# select random slice
_lowerCamelCase : str = int(ids_tensor((1,),output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__,lowercase__,rtol=1e-3 )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCamelCase : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = TFPegasusModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self,config_class=lowercase__ )
def lowerCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowerCAmelCase_ = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase_ = """google/pegasus-xsum"""
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase_ ( self : List[Any],**__A : Union[str, Any] ):
_lowerCamelCase : Any = self.translate_src_text(**lowercase__ )
assert self.expected_text == generated_words
def lowerCamelCase_ ( self : Union[str, Any],**__A : Optional[Any] ):
_lowerCamelCase : Dict = self.tokenizer(self.src_text,**lowercase__,padding=lowercase__,return_tensors="tf" )
_lowerCamelCase : Dict = self.model.generate(
model_inputs.input_ids,attention_mask=model_inputs.attention_mask,num_beams=2,use_cache=lowercase__,)
_lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy(),skip_special_tokens=lowercase__ )
return generated_words
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
self._assert_generated_batch_equal_expected() | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
import numpy
class UpperCAmelCase__ :
def __init__( self : Dict,__A : numpy.ndarray,__A : numpy.ndarray ):
_lowerCamelCase : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCamelCase : Dict = numpy.random.rand(
self.input_array.shape[1],4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCamelCase : Dict = numpy.random.rand(
4,3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCamelCase : Any = numpy.random.rand(3,1 )
# Real output values provided.
_lowerCamelCase : Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCamelCase : List[str] = numpy.zeros(output_array.shape )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Any = sigmoid(
numpy.dot(self.input_array,self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCamelCase : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer,self.first_hidden_layer_and_second_hidden_layer_weights,) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCamelCase : Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer,self.second_hidden_layer_and_output_layer_weights,) )
return self.layer_between_second_hidden_layer_and_output
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T,2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ),)
_lowerCamelCase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T,numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ),self.second_hidden_layer_and_output_layer_weights.T,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ),)
_lowerCamelCase : int = numpy.dot(
self.input_array.T,numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ),self.second_hidden_layer_and_output_layer_weights.T,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ),self.first_hidden_layer_and_second_hidden_layer_weights.T,)
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ),)
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCamelCase_ ( self : Dict,__A : numpy.ndarray,__A : int,__A : bool ):
for iteration in range(1,iterations + 1 ):
_lowerCamelCase : Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCamelCase : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def lowerCamelCase_ ( self : Optional[int],__A : numpy.ndarray ):
_lowerCamelCase : int = input_arr
_lowerCamelCase : Union[str, Any] = sigmoid(
numpy.dot(self.array,self.input_layer_and_first_hidden_layer_weights ) )
_lowerCamelCase : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer,self.first_hidden_layer_and_second_hidden_layer_weights,) )
_lowerCamelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer,self.second_hidden_layer_and_output_layer_weights,) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def A_ ( _lowerCAmelCase : numpy.ndarray ):
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def A_ ( _lowerCAmelCase : numpy.ndarray ):
"""simple docstring"""
return (value) * (1 - (value))
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCamelCase : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCamelCase : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : Any = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase__ :
def __init__( self : Union[str, Any],__A : dict[str, list[str]],__A : str ):
_lowerCamelCase : int = graph
# mapping node to its parent in resulting breadth first tree
_lowerCamelCase : str = {}
_lowerCamelCase : Any = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Union[str, Any] = {self.source_vertex}
_lowerCamelCase : Tuple = None
_lowerCamelCase : List[Any] = [self.source_vertex] # first in first out queue
while queue:
_lowerCamelCase : Any = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__A )
_lowerCamelCase : Any = vertex
queue.append(__A )
def lowerCamelCase_ ( self : List[Any],__A : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
_lowerCamelCase : str = self.parent.get(__A )
if target_vertex_parent is None:
_lowerCamelCase : int = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__A )
return self.shortest_path(__A ) + f'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo')) | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = set()
# keep track of all the paths to be checked
_lowerCamelCase : Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : Tuple = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : Dict = path[-1]
if node not in explored:
_lowerCamelCase : int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Optional[int] = list(lowercase_ )
new_path.append(lowercase_ )
queue.append(lowercase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowercase_ )
# in case there's no path between the 2 nodes
return []
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : int = [start]
_lowerCamelCase : Tuple = set(lowercase_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : str = {start: 0, target: -1}
while queue:
_lowerCamelCase : Union[str, Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowercase_ )
queue.append(lowercase_ )
_lowerCamelCase : Tuple = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase_ : List[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowerCAmelCase_ = field(default=UpperCAmelCase_ , metadata={'help': 'Whether to SortishSamler or not.'} )
lowerCAmelCase_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCAmelCase_ = field(default=UpperCAmelCase_ , metadata={'help': 'whether to use adafactor'} )
lowerCAmelCase_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowerCAmelCase_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowerCAmelCase_ = field(default=UpperCAmelCase_ , metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowerCAmelCase_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowerCAmelCase_ = field(
default='linear' , metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , ) | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
import math
import qiskit
def A_ ( _lowerCAmelCase : Tuple = 1 , _lowerCAmelCase : str = 1 , _lowerCAmelCase : List[Any] = 1 ):
"""simple docstring"""
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_lowerCamelCase : Any = qiskit.QuantumRegister(4 , "qr" )
_lowerCamelCase : List[str] = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_lowerCamelCase : Optional[int] = [input_a, input_a, carry_in]
_lowerCamelCase : Union[str, Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits
_lowerCamelCase : int = qiskit.Aer.get_backend("aer_simulator" )
_lowerCamelCase : Optional[Any] = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''') | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
from datetime import datetime as dt
import os
from github import Github
UpperCAmelCase_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : int = Github(os.environ["GITHUB_TOKEN"] )
_lowerCamelCase : Tuple = g.get_repo("huggingface/transformers" )
_lowerCamelCase : Dict = repo.get_issues(state="open" )
for issue in open_issues:
_lowerCamelCase : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCAmelCase : i.created_at , reverse=snake_case_ )
_lowerCamelCase : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main() | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( _lowerCAmelCase : NDArray[floataa] , _lowerCAmelCase : NDArray[floataa] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCamelCase : Tuple = coefficient_matrix.shape
_lowerCamelCase : List[str] = constant_matrix.shape
if rowsa != colsa:
_lowerCamelCase : Optional[Any] = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(__UpperCamelCase )
if colsa != 1:
_lowerCamelCase : Any = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
_lowerCamelCase : Union[str, Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
_lowerCamelCase : Dict = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'matrix but received {len(__UpperCamelCase )} and {rowsa}'
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
_lowerCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowerCamelCase : Union[str, Any] = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
_lowerCamelCase : str = []
for row in range(__UpperCamelCase ):
_lowerCamelCase : Any = 0
for col in range(__UpperCamelCase ):
if col == row:
_lowerCamelCase : List[str] = table[row][col]
elif col == cols - 1:
_lowerCamelCase : Union[str, Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCamelCase : List[Any] = (temp + val) / denom
new_val.append(__UpperCamelCase )
_lowerCamelCase : Any = new_val
return [float(__UpperCamelCase ) for i in new_val]
def A_ ( _lowerCAmelCase : NDArray[floataa] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = table.shape
_lowerCamelCase : Union[str, Any] = True
for i in range(0 , __UpperCamelCase ):
_lowerCamelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase_ : Optional[Any] = 'scheduler_config.json'
class UpperCAmelCase__ ( lowerCAmelCase__ ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = 5
@dataclass
class UpperCAmelCase__ ( lowerCAmelCase__ ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
lowerCAmelCase_ = SCHEDULER_CONFIG_NAME
lowerCAmelCase_ = ["dtype"]
lowerCAmelCase_ = []
lowerCAmelCase_ = True
@classmethod
def lowerCamelCase_ ( cls : int,__A : Optional[Any] = None,__A : Union[str, Any] = None,__A : int=False,**__A : Tuple,):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase,subfolder=_lowerCamelCase,return_unused_kwargs=_lowerCamelCase,**_lowerCamelCase,)
_lowerCamelCase , _lowerCamelCase : str = cls.from_config(_lowerCamelCase,return_unused_kwargs=_lowerCamelCase,**_lowerCamelCase )
if hasattr(_lowerCamelCase,"create_state" ) and getattr(_lowerCamelCase,"has_state",_lowerCamelCase ):
_lowerCamelCase : str = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCamelCase_ ( self : Optional[int],__A : List[Any],__A : Union[str, Any] = False,**__A : Tuple ):
self.save_config(save_directory=_lowerCamelCase,push_to_hub=_lowerCamelCase,**_lowerCamelCase )
@property
def lowerCamelCase_ ( self : Dict ):
return self._get_compatibles()
@classmethod
def lowerCamelCase_ ( cls : List[Any] ):
_lowerCamelCase : int = list(set([cls.__name__] + cls._compatibles ) )
_lowerCamelCase : int = importlib.import_module(__name__.split("." )[0] )
_lowerCamelCase : List[str] = [
getattr(_lowerCamelCase,_lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase,_lowerCamelCase )
]
return compatible_classes
def A_ ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : Tuple[int] ):
"""simple docstring"""
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0.9_9_9 , _lowerCAmelCase : Union[str, Any]=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_lowerCAmelCase : Any ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
_lowerCamelCase : Tuple = []
for i in range(lowerCamelCase_ ):
_lowerCamelCase : Dict = i / num_diffusion_timesteps
_lowerCamelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Dict ):
_lowerCamelCase : Optional[Any] = scheduler.config
if config.trained_betas is not None:
_lowerCamelCase : Dict = jnp.asarray(config.trained_betas,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCamelCase : int = jnp.linspace(config.beta_start,config.beta_end,config.num_train_timesteps,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5,config.beta_end**0.5,config.num_train_timesteps,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : List[str] = betas_for_alpha_bar(config.num_train_timesteps,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
_lowerCamelCase : int = 1.0 - betas
_lowerCamelCase : List[Any] = jnp.cumprod(_lowerCamelCase,axis=0 )
return cls(
alphas=_lowerCamelCase,betas=_lowerCamelCase,alphas_cumprod=_lowerCamelCase,)
def A_ ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
_lowerCamelCase : List[str] = state.alphas_cumprod
_lowerCamelCase : Optional[int] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[str] = sqrt_alpha_prod.flatten()
_lowerCamelCase : List[Any] = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowerCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : List[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowerCamelCase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def A_ ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowerCamelCase : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def A_ ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowerCamelCase : Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( _A , _A , _A , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Any ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4),layers_per_block=2,sample_size=3_2,in_channels=8,out_channels=4,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),cross_attention_dim=3_2,)
_lowerCamelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,)
_lowerCamelCase : Optional[Any] = CLIPTextModel(__A )
_lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase_ ( self : Dict,__A : int,__A : Any=0 ):
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = image.cpu().permute(0,2,3,1 )[0]
_lowerCamelCase : Union[str, Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" )
if str(__A ).startswith("mps" ):
_lowerCamelCase : Dict = torch.manual_seed(__A )
else:
_lowerCamelCase : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Union[str, Any] = self.get_dummy_components()
_lowerCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
_lowerCamelCase : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Dict = self.get_dummy_inputs(__A )
_lowerCamelCase : Optional[Any] = sd_pipe(**__A ).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
_lowerCamelCase : Optional[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[int] = self.get_dummy_inputs(__A )
_lowerCamelCase : List[str] = "french fries"
_lowerCamelCase : int = sd_pipe(**__A,negative_prompt=__A )
_lowerCamelCase : Optional[int] = output.images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Optional[int] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__A )
_lowerCamelCase : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Tuple = self.get_dummy_inputs(__A )
_lowerCamelCase : int = [inputs["prompt"]] * 2
_lowerCamelCase : Optional[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
_lowerCamelCase : Tuple = image / 2 + 0.5
_lowerCamelCase : Tuple = image.permute(0,3,1,2 )
_lowerCamelCase : Dict = image.repeat(2,1,1,1 )
_lowerCamelCase : List[Any] = sd_pipe(**__A ).images
_lowerCamelCase : Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
_lowerCamelCase : str = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule="scaled_linear" )
_lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
_lowerCamelCase : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Dict = self.get_dummy_inputs(__A )
_lowerCamelCase : Dict = sd_pipe(**__A ).images
_lowerCamelCase : int = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[int] = [round(__A,4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(__A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : str = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[Any] = self.get_dummy_components()
_lowerCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
_lowerCamelCase : List[Any] = VaeImageProcessor(do_resize=__A,do_normalize=__A )
_lowerCamelCase : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__A,input_image_type="pt" ) )[0]
_lowerCamelCase : Dict = components["vae"]
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs_by_type(__A,input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowerCamelCase : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
_lowerCamelCase : Dict = pipe(**__A )[0]
_lowerCamelCase : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A,1e-4,"passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : int,__A : Union[str, Any]=0 ):
_lowerCamelCase : List[Any] = torch.manual_seed(__A )
_lowerCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
_lowerCamelCase : List[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_lowerCamelCase : int = self.get_inputs()
_lowerCamelCase : List[str] = pipe(**__A ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",safety_checker=__A )
_lowerCamelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[Any] = self.get_inputs()
_lowerCamelCase : Dict = pipe(**__A ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",safety_checker=__A )
_lowerCamelCase : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_lowerCamelCase : Tuple = self.get_inputs()
_lowerCamelCase : str = pipe(**__A ).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = 0
def callback_fn(__A : List[Any],__A : Optional[Any],__A : Dict ) -> None:
_lowerCamelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCamelCase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowerCamelCase : Any = latents[0, -3:, -3:, -1]
_lowerCamelCase : Optional[Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_lowerCamelCase : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowerCamelCase : List[str] = latents[0, -3:, -3:, -1]
_lowerCamelCase : Tuple = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_lowerCamelCase : Any = False
_lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",safety_checker=__A,torch_dtype=torch.floataa )
_lowerCamelCase : Optional[Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_lowerCamelCase : Dict = self.get_inputs()
pipe(**__A,callback=__A,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : Optional[int] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",safety_checker=__A,torch_dtype=torch.floataa )
_lowerCamelCase : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : Any = self.get_inputs()
_lowerCamelCase : int = pipe(**__A )
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCamelCase : Optional[int] = inputs["image"].resize((5_0_4, 5_0_4) )
_lowerCamelCase : List[Any] = "timbrooks/instruct-pix2pix"
_lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A,safety_checker=__A,)
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[int] = pipe(**__A )
_lowerCamelCase : Any = output.images[0]
_lowerCamelCase : Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
_lowerCamelCase : Optional[int] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = F'{sampling_rate}'
_lowerCamelCase : Dict = "1"
_lowerCamelCase : Optional[Any] = "f32le"
_lowerCamelCase : Union[str, Any] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCamelCase : Any = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
_lowerCamelCase : List[Any] = output_stream[0]
_lowerCamelCase : Optional[int] = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] = "f32le" , ):
"""simple docstring"""
_lowerCamelCase : Dict = F'{sampling_rate}'
_lowerCamelCase : Optional[Any] = "1"
if format_for_conversion == "s16le":
_lowerCamelCase : Tuple = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : List[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
_lowerCamelCase : Any = platform.system()
if system == "Linux":
_lowerCamelCase : Tuple = "alsa"
_lowerCamelCase : Union[str, Any] = "default"
elif system == "Darwin":
_lowerCamelCase : int = "avfoundation"
_lowerCamelCase : Optional[Any] = ":0"
elif system == "Windows":
_lowerCamelCase : List[Any] = "dshow"
_lowerCamelCase : Dict = "default"
_lowerCamelCase : Any = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCamelCase : List[str] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = None , _lowerCAmelCase : Dict = None , _lowerCAmelCase : int = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
_lowerCamelCase : Optional[Any] = stream_chunk_s
else:
_lowerCamelCase : List[Any] = chunk_length_s
_lowerCamelCase : List[str] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
_lowerCamelCase : int = np.intaa
_lowerCamelCase : Optional[int] = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : Union[str, Any] = np.floataa
_lowerCamelCase : List[str] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
_lowerCamelCase : Optional[int] = chunk_length_s / 6
_lowerCamelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
_lowerCamelCase : Dict = [stride_length_s, stride_length_s]
_lowerCamelCase : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCamelCase : Union[str, Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCamelCase : List[Any] = datetime.datetime.now()
_lowerCamelCase : str = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
_lowerCamelCase : List[Any] = np.frombuffer(item["raw"] , dtype=a__ )
_lowerCamelCase : Dict = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_lowerCamelCase : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] = False ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = B""
_lowerCamelCase , _lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
_lowerCamelCase : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
_lowerCamelCase : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
_lowerCamelCase : Optional[Any] = (_stride_left, stride_right)
_lowerCamelCase : Dict = {"raw": acc[:chunk_len], "stride": stride}
if stream:
_lowerCamelCase : List[str] = False
yield item
_lowerCamelCase : List[str] = stride_left
_lowerCamelCase : Any = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
_lowerCamelCase : Optional[int] = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
_lowerCamelCase : Optional[Any] = False
yield item
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
_lowerCamelCase : int = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
lowerCAmelCase_ = 'cvt'
def __init__( self : Any,__A : List[str]=3,__A : Any=[7, 3, 3],__A : Optional[Any]=[4, 2, 2],__A : Union[str, Any]=[2, 1, 1],__A : List[str]=[6_4, 1_9_2, 3_8_4],__A : int=[1, 3, 6],__A : Any=[1, 2, 1_0],__A : Optional[int]=[4.0, 4.0, 4.0],__A : List[Any]=[0.0, 0.0, 0.0],__A : Tuple=[0.0, 0.0, 0.0],__A : Any=[0.0, 0.0, 0.1],__A : Any=[True, True, True],__A : Any=[False, False, True],__A : Dict=["dw_bn", "dw_bn", "dw_bn"],__A : Optional[Any]=[3, 3, 3],__A : Dict=[1, 1, 1],__A : List[str]=[2, 2, 2],__A : Dict=[1, 1, 1],__A : int=[1, 1, 1],__A : Any=0.02,__A : Optional[Any]=1e-12,**__A : Union[str, Any],):
super().__init__(**__A )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Tuple = patch_sizes
_lowerCamelCase : List[Any] = patch_stride
_lowerCamelCase : Union[str, Any] = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : str = num_heads
_lowerCamelCase : int = depth
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Tuple = attention_drop_rate
_lowerCamelCase : Optional[int] = drop_rate
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : int = qkv_bias
_lowerCamelCase : Union[str, Any] = cls_token
_lowerCamelCase : Optional[Any] = qkv_projection_method
_lowerCamelCase : List[str] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : List[Any] = stride_kv
_lowerCamelCase : List[Any] = padding_q
_lowerCamelCase : Union[str, Any] = stride_q
_lowerCamelCase : int = initializer_range
_lowerCamelCase : int = layer_norm_eps | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] = 0 ):
"""simple docstring"""
_lowerCamelCase : Any = length or len(_UpperCAmelCase )
_lowerCamelCase : Union[str, Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCamelCase , _lowerCamelCase : List[Any] = list_data[i + 1], list_data[i]
_lowerCamelCase : Optional[Any] = True
return list_data if not swapped else bubble_sort(_UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
class UpperCAmelCase__ :
def __init__( self : Union[str, Any],__A : Dict ):
_lowerCamelCase : List[str] = metric_id
class UpperCAmelCase__ :
lowerCAmelCase_ = [MetricMock(UpperCamelCase_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCamelCase_ ( self : str ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
if "tmp_path" in args:
_lowerCamelCase : Dict = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(_lowerCAmelCase , match="https://huggingface.co/docs/evaluate" ):
func(*_lowerCAmelCase ) | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase_ : int = 'hf-internal-testing/tiny-random-bert'
UpperCAmelCase_ : int = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
UpperCAmelCase_ : Dict = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = cached_file(__A,__A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__A,__A ) ) )
with open(os.path.join(__A,"refs","main" ) ) as f:
_lowerCamelCase : Tuple = f.read()
self.assertEqual(__A,os.path.join(__A,"snapshots",__A,__A ) )
self.assertTrue(os.path.isfile(__A ) )
# File is cached at the same place the second time.
_lowerCamelCase : Any = cached_file(__A,__A )
self.assertEqual(__A,__A )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Union[str, Any] = cached_file(__A,__A,revision="9b8c223" )
self.assertEqual(__A,os.path.join(__A,"snapshots",__A,__A ) )
def lowerCamelCase_ ( self : Optional[Any] ):
with self.assertRaisesRegex(__A,"is not a valid model identifier" ):
_lowerCamelCase : Tuple = cached_file("tiny-random-bert",__A )
with self.assertRaisesRegex(__A,"is not a valid git identifier" ):
_lowerCamelCase : Optional[int] = cached_file(__A,__A,revision="aaaa" )
with self.assertRaisesRegex(__A,"does not appear to have a file named" ):
_lowerCamelCase : List[Any] = cached_file(__A,"conf" )
def lowerCamelCase_ ( self : Dict ):
with self.assertRaisesRegex(__A,"does not appear to have a file named" ):
_lowerCamelCase : Union[str, Any] = cached_file(__A,"conf" )
with open(os.path.join(__A,"refs","main" ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__A,".no_exist",__A,"conf" ) ) )
_lowerCamelCase : Optional[int] = cached_file(__A,"conf",_raise_exceptions_for_missing_entries=__A )
self.assertIsNone(__A )
_lowerCamelCase : int = cached_file(__A,"conf",local_files_only=__A,_raise_exceptions_for_missing_entries=__A )
self.assertIsNone(__A )
_lowerCamelCase : Dict = mock.Mock()
_lowerCamelCase : Union[str, Any] = 5_0_0
_lowerCamelCase : int = {}
_lowerCamelCase : str = HTTPError
_lowerCamelCase : Union[str, Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request",return_value=__A ) as mock_head:
_lowerCamelCase : Optional[Any] = cached_file(__A,"conf",_raise_exceptions_for_connection_errors=__A )
self.assertIsNone(__A )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self : Optional[Any] ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only",__A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only",__A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only",__A ) )
def lowerCamelCase_ ( self : Tuple ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased","ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__A,"is not a valid model identifier" ):
get_file_from_repo("bert-base-case",__A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__A,"is not a valid git identifier" ):
get_file_from_repo("bert-base-cased",__A,revision="ahaha" )
_lowerCamelCase : Any = get_file_from_repo("bert-base-cased",__A )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : List[str] = json.loads(open(__A,"r" ).read() )
self.assertEqual(config["hidden_size"],7_6_8 )
def lowerCamelCase_ ( self : str ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__A ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(__A,"a.txt" ),str(__A ) )
self.assertIsNone(get_file_from_repo(__A,"b.txt" ) ) | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
lowerCAmelCase_ = TransfoXLTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : str ):
super().setUp()
_lowerCamelCase : Dict = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : int,**__A : str ):
_lowerCamelCase : List[str] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname,**_lowercase )
def lowerCamelCase_ ( self : Optional[Any],__A : List[str] ):
_lowerCamelCase : Optional[int] = """<unk> UNwanted , running"""
_lowerCamelCase : str = """<unk> unwanted, running"""
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file,lower_case=_lowercase )
_lowerCamelCase : Optional[int] = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(_lowercase,["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ),[0, 4, 8, 7] )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = TransfoXLTokenizer(lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ),["hello", "!", "how", "are", "you", "?"] )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Tuple = TransfoXLTokenizer(lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ),["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = TransfoXLTokenizer(lower_case=_lowercase )
_lowerCamelCase : Union[str, Any] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_lowerCamelCase : Optional[int] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(_lowercase ),_lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(_lowercase ),_lowercase )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : int = len(_lowercase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1",1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_lowercase ),original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ),[1] )
self.assertEqual(tokenizer.decode([1] ),"new1" )
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase_ )
_lowerCamelCase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
else:
_lowerCamelCase : int = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase_ )
_lowerCamelCase : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
_lowerCamelCase : Any = ["""key_proj""", """value_proj""", """query_proj"""]
_lowerCamelCase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_lowerCamelCase : Optional[int] = key.split("." )
if attributes[0] == "lm_head":
_lowerCamelCase : int = prophet
_lowerCamelCase : Tuple = prophet_old
else:
_lowerCamelCase : Optional[int] = prophet.prophetnet
_lowerCamelCase : Optional[int] = prophet_old.model
_lowerCamelCase : List[str] = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase : str = mapping[attribute]
if not hasattr(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
_lowerCamelCase : Union[str, Any] = attribute
elif hasattr(lowercase_ , lowercase_ ):
_lowerCamelCase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase : Union[str, Any] = old_model.weight
logger.info(F'{attribute} is initialized.' )
_lowerCamelCase : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase : Tuple = old_model.bias
logger.info(F'{attribute} is initialized' )
_lowerCamelCase : Optional[Any] = True
break
elif attribute in special_keys and hasattr(lowercase_ , "in_proj_weight" ):
_lowerCamelCase : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase : Dict = getattr(lowercase_ , lowercase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_lowerCamelCase : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_lowerCamelCase : str = True
break
if attribute.isdigit():
_lowerCamelCase : Optional[Any] = model[int(lowercase_ )]
_lowerCamelCase : Optional[int] = old_model[int(lowercase_ )]
else:
_lowerCamelCase : List[Any] = getattr(lowercase_ , lowercase_ )
if old_attribute == "":
_lowerCamelCase : Any = old_model
else:
if not hasattr(lowercase_ , lowercase_ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_lowerCamelCase : str = getattr(lowercase_ , lowercase_ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(lowercase_ )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : list[int] , _lowerCAmelCase : float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def A_ ( ) -> None:
"""simple docstring"""
_lowerCamelCase : Any = [90, 23, 6, 33, 21, 65, 123, 34423]
_lowerCamelCase : Dict = math.log(len(lowerCAmelCase__ ) , 2 )
print(F'Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=0 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for old_item in old_list:
_lowerCamelCase : Union[str, Any] = old_item.replace("in_layers.0" , "norm1" )
_lowerCamelCase : List[str] = new_item.replace("in_layers.2" , "conv1" )
_lowerCamelCase : List[str] = new_item.replace("out_layers.0" , "norm2" )
_lowerCamelCase : Union[str, Any] = new_item.replace("out_layers.3" , "conv2" )
_lowerCamelCase : List[str] = new_item.replace("emb_layers.1" , "time_emb_proj" )
_lowerCamelCase : str = new_item.replace("skip_connection" , "conv_shortcut" )
_lowerCamelCase : int = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=0 ):
"""simple docstring"""
_lowerCamelCase : Any = []
for old_item in old_list:
_lowerCamelCase : List[str] = old_item
_lowerCamelCase : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
_lowerCamelCase : Optional[int] = new_item.replace("norm.bias" , "group_norm.bias" )
_lowerCamelCase : int = new_item.replace("proj_out.weight" , "proj_attn.weight" )
_lowerCamelCase : Any = new_item.replace("proj_out.bias" , "proj_attn.bias" )
_lowerCamelCase : Optional[Any] = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Tuple=None ):
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCamelCase : Optional[Any] = old_checkpoint[path]
_lowerCamelCase : int = old_tensor.shape[0] // 3
_lowerCamelCase : Optional[int] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCamelCase : str = old_tensor.shape[0] // config["num_head_channels"] // 3
_lowerCamelCase : Union[str, Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = old_tensor.split(channels // num_heads , dim=1 )
_lowerCamelCase : Optional[Any] = query.reshape(_lowerCamelCase )
_lowerCamelCase : int = key.reshape(_lowerCamelCase )
_lowerCamelCase : str = value.reshape(_lowerCamelCase )
for path in paths:
_lowerCamelCase : int = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCamelCase : Union[str, Any] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
_lowerCamelCase : Optional[Any] = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
_lowerCamelCase : Any = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCamelCase : Any = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCamelCase : Optional[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
_lowerCamelCase : List[str] = old_checkpoint[path["old"]]
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = {}
_lowerCamelCase : Dict = checkpoint["time_embed.0.weight"]
_lowerCamelCase : Tuple = checkpoint["time_embed.0.bias"]
_lowerCamelCase : Tuple = checkpoint["time_embed.2.weight"]
_lowerCamelCase : List[str] = checkpoint["time_embed.2.bias"]
_lowerCamelCase : Optional[int] = checkpoint["input_blocks.0.0.weight"]
_lowerCamelCase : Optional[int] = checkpoint["input_blocks.0.0.bias"]
_lowerCamelCase : Any = checkpoint["out.0.weight"]
_lowerCamelCase : Optional[Any] = checkpoint["out.0.bias"]
_lowerCamelCase : Any = checkpoint["out.2.weight"]
_lowerCamelCase : Union[str, Any] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
_lowerCamelCase : List[str] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
_lowerCamelCase : Dict = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCamelCase : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
_lowerCamelCase : List[Any] = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCamelCase : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
_lowerCamelCase : Dict = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(_lowerCamelCase )
}
for i in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = (i - 1) // (config["num_res_blocks"] + 1)
_lowerCamelCase : Dict = (i - 1) % (config["num_res_blocks"] + 1)
_lowerCamelCase : Union[str, Any] = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
_lowerCamelCase : Any = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
_lowerCamelCase : str = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
_lowerCamelCase : Tuple = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
_lowerCamelCase : str = renew_resnet_paths(_lowerCamelCase )
_lowerCamelCase : int = {"old": F'input_blocks.{i}.0', "new": F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
_lowerCamelCase : Optional[Any] = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path, resnet_op] , config=_lowerCamelCase )
if len(_lowerCamelCase ):
_lowerCamelCase : List[Any] = renew_attention_paths(_lowerCamelCase )
_lowerCamelCase : List[str] = {
"old": F'input_blocks.{i}.1',
"new": F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
_lowerCamelCase : Tuple = {
F'input_blocks.{i}.1.qkv.bias': {
"key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
"key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase , )
_lowerCamelCase : str = middle_blocks[0]
_lowerCamelCase : Optional[int] = middle_blocks[1]
_lowerCamelCase : Dict = middle_blocks[2]
_lowerCamelCase : Dict = renew_resnet_paths(_lowerCamelCase )
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
_lowerCamelCase : Tuple = renew_resnet_paths(_lowerCamelCase )
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
_lowerCamelCase : Optional[int] = renew_attention_paths(_lowerCamelCase )
_lowerCamelCase : int = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = i // (config["num_res_blocks"] + 1)
_lowerCamelCase : List[str] = i % (config["num_res_blocks"] + 1)
_lowerCamelCase : int = [shave_segments(_lowerCamelCase , 2 ) for name in output_blocks[i]]
_lowerCamelCase : Tuple = {}
for layer in output_block_layers:
_lowerCamelCase , _lowerCamelCase : str = layer.split("." )[0], shave_segments(_lowerCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowerCamelCase )
else:
_lowerCamelCase : str = [layer_name]
if len(_lowerCamelCase ) > 1:
_lowerCamelCase : Optional[Any] = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
_lowerCamelCase : Tuple = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
_lowerCamelCase : Tuple = renew_resnet_paths(_lowerCamelCase )
_lowerCamelCase : int = renew_resnet_paths(_lowerCamelCase )
_lowerCamelCase : Optional[int] = {"old": F'output_blocks.{i}.0', "new": F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCamelCase : List[str] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
_lowerCamelCase : Optional[int] = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
_lowerCamelCase : List[Any] = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(_lowerCamelCase ) == 2:
_lowerCamelCase : str = []
if len(_lowerCamelCase ):
_lowerCamelCase : Dict = renew_attention_paths(_lowerCamelCase )
_lowerCamelCase : List[str] = {
"old": F'output_blocks.{i}.1',
"new": F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
_lowerCamelCase : Any = {
F'output_blocks.{i}.1.qkv.bias': {
"key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
"key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=_lowerCamelCase , )
else:
_lowerCamelCase : Union[str, Any] = renew_resnet_paths(_lowerCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCamelCase : int = ".".join(["output_blocks", str(_lowerCamelCase ), path["old"]] )
_lowerCamelCase : Union[str, Any] = ".".join(["up_blocks", str(_lowerCamelCase ), "resnets", str(_lowerCamelCase ), path["new"]] )
_lowerCamelCase : str = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : int = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCAmelCase_ : List[str] = json.loads(f.read())
UpperCAmelCase_ : Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCAmelCase_ : Optional[int] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCAmelCase_ : Optional[Any] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCAmelCase__ ( snake_case_ ):
def lowerCamelCase_ ( self : Any ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[Any] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(__A )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = self._create_example_records()
_lowerCamelCase : Union[str, Any] = Dataset.from_list(__A )
self.assertListEqual(dset.column_names,["col_1", "col_2"] )
for i, r in enumerate(__A ):
self.assertDictEqual(__A,example_records[i] )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = self._create_example_records()
_lowerCamelCase : int = Dataset.from_list(__A )
_lowerCamelCase : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info,dset_from_dict.info )
def lowerCamelCase_ ( self : Tuple ): # checks what happens with missing columns
_lowerCamelCase : List[str] = [{"col_1": 1}, {"col_2": "x"}]
_lowerCamelCase : Tuple = Dataset.from_list(__A )
self.assertDictEqual(dset[0],{"col_1": 1} )
self.assertDictEqual(dset[1],{"col_1": None} ) # NB: first record is used for columns
def lowerCamelCase_ ( self : Optional[int] ): # checks if the type can be inferred from the second record
_lowerCamelCase : List[Any] = [{"col_1": []}, {"col_1": [1, 2]}]
_lowerCamelCase : Optional[int] = Dataset.from_list(__A )
self.assertEqual(dset.info.features["col_1"],Sequence(Value("int64" ) ) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = Dataset.from_list([] )
self.assertEqual(len(__A ),0 )
self.assertListEqual(dset.column_names,[] ) | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = sum(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowerCamelCase : List[Any] = True
for i in range(1 , s + 1 ):
_lowerCamelCase : Optional[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowerCamelCase : List[str] = dp[i][j - 1]
if arr[i - 1] <= j:
_lowerCamelCase : List[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowerCamelCase : str = s - 2 * j
break
return diff | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = BlenderbotSmallTokenizer
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Any ):
super().setUp()
_lowerCamelCase : Any = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_lowerCamelCase : str = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Union[str, Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_lowerCamelCase : str = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : List[str],**__A : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Tuple,__A : Tuple ):
_lowerCamelCase : Union[str, Any] = 'adapt act apte'
_lowerCamelCase : List[Any] = 'adapt act apte'
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = BlenderbotSmallTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Dict = 'adapt act apte'
_lowerCamelCase : str = ['adapt', 'act', 'ap@@', 'te']
_lowerCamelCase : Dict = tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : Optional[int] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
_lowerCamelCase : Any = 'I am a small frog.'
_lowerCamelCase : List[str] = tok([src_text],padding=__A,truncation=__A )['input_ids']
_lowerCamelCase : Union[str, Any] = tok.batch_decode(__A,skip_special_tokens=__A,clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_lowerCamelCase : Optional[int] = 'I am a small frog .'
_lowerCamelCase : Any = '.'
_lowerCamelCase : Optional[int] = tok(__A )['input_ids']
_lowerCamelCase : Any = tok(__A )['input_ids']
assert encoded[-1] == encoded_dot[0] | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
while second != 0:
_lowerCamelCase : List[str] = first & second
first ^= second
_lowerCamelCase : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = int(input('Enter the first number: ').strip())
UpperCAmelCase_ : List[Any] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''') | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
def merge(_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__snake_case ) <= 1:
return collection
_lowerCamelCase : Any = len(__snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : List[str] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',') | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ):
"""simple docstring"""
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
_lowerCamelCase : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCamelCase : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
UpperCAmelCase_ : Tuple = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
UpperCAmelCase_ : str = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = set()
_lowerCamelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : Optional[Any] = char
_lowerCamelCase : Optional[Any] = set(lowerCAmelCase_ )
return pairs
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any],__A : Optional[Any],__A : List[str],__A : Any="<s>",__A : Dict="</s>",__A : int="</s>",__A : List[Any]="<s>",__A : Any="<unk>",__A : str="<pad>",__A : Any="<mask>",**__A : Optional[Any],):
super().__init__(
bos_token=a_,eos_token=a_,unk_token=a_,sep_token=a_,cls_token=a_,pad_token=a_,mask_token=a_,**a_,)
_lowerCamelCase : Optional[int] = vocab_file
_lowerCamelCase : Any = merges_file
_lowerCamelCase : Any = {}
_lowerCamelCase : Any = 0
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : Tuple = 2
_lowerCamelCase : List[str] = 3
self.add_from_file(a_ )
_lowerCamelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(a_,encoding="utf-8" ) as merges_handle:
_lowerCamelCase : Optional[Any] = merges_handle.read().split("\n" )[:-1]
_lowerCamelCase : Tuple = [tuple(merge.split()[:-1] ) for merge in merges]
_lowerCamelCase : Tuple = dict(zip(a_,range(len(a_ ) ) ) )
_lowerCamelCase : List[str] = {}
def lowerCamelCase_ ( self : Tuple,__A : List[str],__A : Union[str, Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
_lowerCamelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : int = None,__A : Dict = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_,token_ids_a=a_,already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def lowerCamelCase_ ( self : Any,__A : int,__A : str = None ):
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase_ ( self : Tuple,__A : List[str] ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase : Tuple = tuple(a_ )
_lowerCamelCase : int = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_lowerCamelCase : List[Any] = get_pairs(a_ )
if not pairs:
return token
while True:
_lowerCamelCase : Tuple = min(a_,key=lambda __A : self.bpe_ranks.get(a_,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase : Any = bigram
_lowerCamelCase : int = []
_lowerCamelCase : Optional[Any] = 0
while i < len(a_ ):
try:
_lowerCamelCase : Tuple = word.index(a_,a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : Dict = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : Optional[Any] = tuple(a_ )
_lowerCamelCase : List[Any] = new_word
if len(a_ ) == 1:
break
else:
_lowerCamelCase : Union[str, Any] = get_pairs(a_ )
_lowerCamelCase : Tuple = "@@ ".join(a_ )
_lowerCamelCase : Union[str, Any] = word[:-4]
_lowerCamelCase : Any = word
return word
def lowerCamelCase_ ( self : Tuple,__A : Optional[Any] ):
_lowerCamelCase : str = []
_lowerCamelCase : Dict = re.findall(r"\S+\n?",a_ )
for token in words:
split_tokens.extend(list(self.bpe(a_ ).split(" " ) ) )
return split_tokens
def lowerCamelCase_ ( self : Optional[int],__A : Optional[Any] ):
return self.encoder.get(a_,self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : List[Any],__A : Dict ):
return self.decoder.get(a_,self.unk_token )
def lowerCamelCase_ ( self : List[Any],__A : List[str] ):
_lowerCamelCase : Any = " ".join(a_ ).replace("@@ ","" ).strip()
return out_string
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : str = None ):
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Optional[int] = os.path.join(
a_,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Optional[Any] = os.path.join(
a_,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file,a_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(a_ ):
copyfile(self.merges_file,a_ )
return out_vocab_file, out_merge_file
def lowerCamelCase_ ( self : Optional[int],__A : Any ):
if isinstance(a_,a_ ):
try:
with open(a_,"r",encoding="utf-8" ) as fd:
self.add_from_file(a_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
_lowerCamelCase : Tuple = f.readlines()
for lineTmp in lines:
_lowerCamelCase : Dict = lineTmp.strip()
_lowerCamelCase : Optional[Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
_lowerCamelCase : Any = line[:idx]
_lowerCamelCase : Optional[Any] = len(self.encoder ) | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = ShapEPipeline
lowerCAmelCase_ = ['prompt']
lowerCAmelCase_ = ['prompt']
lowerCAmelCase_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def lowerCamelCase_ ( self : Optional[int] ):
return 3_2
@property
def lowerCamelCase_ ( self : List[Any] ):
return 3_2
@property
def lowerCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return 8
@property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCamelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=3_7,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,)
return CLIPTextModelWithProjection(__A )
@property
def lowerCamelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_lowerCamelCase : Any = PriorTransformer(**__A )
return model
@property
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
_lowerCamelCase : Any = ShapERenderer(**__A )
return model
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Tuple = self.dummy_prior
_lowerCamelCase : Tuple = self.dummy_text_encoder
_lowerCamelCase : List[str] = self.dummy_tokenizer
_lowerCamelCase : Dict = self.dummy_renderer
_lowerCamelCase : Optional[int] = HeunDiscreteScheduler(
beta_schedule="exp",num_train_timesteps=1_0_2_4,prediction_type="sample",use_karras_sigmas=__A,clip_sample=__A,clip_sample_range=1.0,)
_lowerCamelCase : Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCamelCase_ ( self : Dict,__A : Any,__A : str=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : int = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : Optional[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = "cpu"
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : Optional[int] = self.pipeline_class(**__A )
_lowerCamelCase : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Dict = pipe(**self.get_dummy_inputs(__A ) )
_lowerCamelCase : str = output.images[0]
_lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowerCamelCase : Any = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Union[str, Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = torch_device == "cpu"
_lowerCamelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2,test_max_difference=__A,relax_max_difference=__A,)
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**__A )
_lowerCamelCase : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 2
_lowerCamelCase : List[str] = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
_lowerCamelCase : Optional[int] = batch_size * [inputs[key]]
_lowerCamelCase : Tuple = pipe(**__A,num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
_lowerCamelCase : Union[str, Any] = ShapEPipeline.from_pretrained("openai/shap-e" )
_lowerCamelCase : Optional[Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe(
"a shark",generator=__A,guidance_scale=15.0,num_inference_steps=6_4,frame_size=6_4,output_type="np",).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__A,__A ) | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase_ : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
UpperCAmelCase_ : str = f'''https://www.google.com/search?q={query}&num=100'''
UpperCAmelCase_ : int = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
UpperCAmelCase_ : str = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
UpperCAmelCase_ : Any = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link) | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
UpperCAmelCase_ : Any = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
} | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = len(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_lowerCamelCase : Tuple = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCAmelCase_ : Optional[Any] = 'src/transformers'
# Matches is_xxx_available()
UpperCAmelCase_ : int = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ : List[str] = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ : Dict = re.compile(R'\s+\"\S*\":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
UpperCAmelCase_ : List[str] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ : Dict = re.compile(R'^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ : int = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ : List[Any] = re.compile('^\s+\"([^\"]+)\",')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ : Dict = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ : Dict = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
UpperCAmelCase_ : str = re.compile(R'^\s*try:')
# Catches a line with else:
UpperCAmelCase_ : str = re.compile(R'^\s*else:')
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if _re_test_backend.search(__snake_case ) is None:
return None
_lowerCamelCase : Dict = [b[0] for b in _re_backend.findall(__snake_case )]
backends.sort()
return "_and_".join(__snake_case )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : Optional[Any] = f.readlines()
_lowerCamelCase : Optional[Any] = 0
while line_index < len(__snake_case ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__snake_case ):
_lowerCamelCase : Optional[Any] = _re_one_line_import_struct.search(__snake_case ).groups()[0]
_lowerCamelCase : Optional[Any] = re.findall("\[([^\]]+)\]" , __snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_lowerCamelCase : Optional[Any] = _re_import_struct_key_value.search(__snake_case )
if single_line_import_search is not None:
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_lowerCamelCase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(__snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(__snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(__snake_case ) is not None:
_lowerCamelCase : Union[str, Any] = _re_import_struct_add_many.search(__snake_case ).groups()[0].split(", " )
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_between_brackets.search(__snake_case ) is not None:
_lowerCamelCase : str = _re_between_brackets.search(__snake_case ).groups()[0].split(", " )
_lowerCamelCase : Any = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_quote_object.search(__snake_case ) is not None:
objects.append(_re_quote_object.search(__snake_case ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : Optional[Any] = []
while (
line_index < len(__snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_lowerCamelCase : Optional[int] = lines[line_index]
_lowerCamelCase : int = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : str = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_lowerCamelCase : Dict = lines[line_index]
_lowerCamelCase : Tuple = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
def find_duplicates(_lowerCAmelCase : List[Any] ):
return [k for k, v in collections.Counter(__snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : Optional[int] = []
for key in import_dict_objects.keys():
_lowerCamelCase : List[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
_lowerCamelCase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : Tuple = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
_lowerCamelCase : str = os.path.join(__snake_case , "__init__.py" )
_lowerCamelCase : Dict = parse_init(__snake_case )
if objects is not None:
_lowerCamelCase : List[Any] = analyze_results(*__snake_case )
if len(__snake_case ) > 0:
_lowerCamelCase : Optional[Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(__snake_case ) )
if len(__snake_case ) > 0:
raise ValueError("\n\n".join(__snake_case ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = []
for path, directories, files in os.walk(__snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__snake_case ) / folder).glob("*.py" ) ) ) == 0:
continue
_lowerCamelCase : Tuple = str((Path(__snake_case ) / folder).relative_to(__snake_case ) )
_lowerCamelCase : Tuple = short_path.replace(os.path.sep , "." )
submodules.append(__snake_case )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : List[Any] = str((Path(__snake_case ) / fname).relative_to(__snake_case ) )
_lowerCamelCase : Union[str, Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__snake_case )
return submodules
UpperCAmelCase_ : Any = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__snake_case , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCamelCase : Dict = spec.loader.load_module()
_lowerCamelCase : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__snake_case ) > 0:
_lowerCamelCase : Any = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( lowercase_ ):
def lowerCamelCase_ ( self : Union[str, Any],__A : float ):
return 0.0
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Optional[int] = [1] + [0] * (size - 1)
_lowerCamelCase : Tuple = [filter_type.process(lowerCamelCase_ ) for item in inputs]
_lowerCamelCase : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Optional[Any] = np.abs(np.fft.fft(lowerCamelCase_ ) )
_lowerCamelCase : Union[str, Any] = 20 * np.logaa(lowerCamelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(lowerCamelCase_ , lowerCamelCase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(lowerCamelCase_ )
plt.show()
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = 512
_lowerCamelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCamelCase : str = [filter_type.process(lowerCamelCase_ ) for item in inputs]
_lowerCamelCase : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Optional[Any] = np.angle(np.fft.fft(lowerCamelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(lowerCamelCase_ , -2 * pi ) )
plt.show() | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase_ : Tuple = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : Optional[str] = None ):
_lowerCamelCase : List[Any] = (
os.path.join(UpperCamelCase_,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCamelCase : Tuple = Extractor
def lowerCamelCase_ ( self : List[str],__A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCamelCase : Any = os.path.abspath(UpperCamelCase_ )
return os.path.join(self.extract_dir,hash_url_to_filename(UpperCamelCase_ ) )
def lowerCamelCase_ ( self : str,__A : str,__A : bool ):
return force_extract or (
not os.path.isfile(UpperCamelCase_ ) and not (os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ))
)
def lowerCamelCase_ ( self : str,__A : str,__A : bool = False ):
_lowerCamelCase : int = self.extractor.infer_extractor_format(UpperCamelCase_ )
if not extractor_format:
return input_path
_lowerCamelCase : Tuple = self._get_output_path(UpperCamelCase_ )
if self._do_extract(UpperCamelCase_,UpperCamelCase_ ):
self.extractor.extract(UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ )
return output_path
class UpperCAmelCase__ ( __lowerCamelCase ):
@classmethod
@abstractmethod
def lowerCamelCase_ ( cls : Optional[Any],__A : Union[Path, str],**__A : Optional[int] ):
...
@staticmethod
@abstractmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
...
class UpperCAmelCase__ ( __lowerCamelCase , __lowerCamelCase ):
lowerCAmelCase_ = []
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
with open(UpperCamelCase_,"rb" ) as f:
return f.read(UpperCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any,__A : Union[Path, str],__A : bytes = b"" ):
if not magic_number:
_lowerCamelCase : Optional[int] = max(len(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCamelCase : str = cls.read_magic_number(UpperCamelCase_,UpperCamelCase_ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( __lowerCamelCase ):
@classmethod
def lowerCamelCase_ ( cls : Optional[int],__A : Union[Path, str],**__A : int ):
return tarfile.is_tarfile(UpperCamelCase_ )
@staticmethod
def lowerCamelCase_ ( __A : Tuple,__A : str ):
def resolved(__A : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase_ ) )
def badpath(__A : str,__A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase_,UpperCamelCase_ ) ).startswith(UpperCamelCase_ )
def badlink(__A : Optional[int],__A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCamelCase : Any = resolved(os.path.join(UpperCamelCase_,os.path.dirname(info.name ) ) )
return badpath(info.linkname,base=UpperCamelCase_ )
_lowerCamelCase : List[str] = resolved(UpperCamelCase_ )
for finfo in members:
if badpath(finfo.name,UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(UpperCamelCase_,UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(UpperCamelCase_,UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(UpperCamelCase_,exist_ok=UpperCamelCase_ )
_lowerCamelCase : Any = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_,members=TarExtractor.safemembers(UpperCamelCase_,UpperCamelCase_ ) )
tar_file.close()
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [b'\x1F\x8B']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with gzip.open(UpperCamelCase_,"rb" ) as gzip_file:
with open(UpperCamelCase_,"wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_,UpperCamelCase_ )
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCamelCase_ ( cls : str,__A : Union[Path, str],__A : bytes = b"" ):
if super().is_extractable(UpperCamelCase_,magic_number=UpperCamelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase_,"rb" ) as fp:
_lowerCamelCase : Tuple = _EndRecData(UpperCamelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCamelCase : Any = fp.read(UpperCamelCase_ ) # CD is where we expect it to be
if len(UpperCamelCase_ ) == sizeCentralDir:
_lowerCamelCase : int = struct.unpack(UpperCamelCase_,UpperCamelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(UpperCamelCase_,exist_ok=UpperCamelCase_ )
with zipfile.ZipFile(UpperCamelCase_,"r" ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with lzma.open(UpperCamelCase_ ) as compressed_file:
with open(UpperCamelCase_,"wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_,UpperCamelCase_ )
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase_,exist_ok=UpperCamelCase_ )
_lowerCamelCase : Optional[int] = rarfile.RarFile(UpperCamelCase_ )
rf.extractall(UpperCamelCase_ )
rf.close()
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_lowerCamelCase : Tuple = zstd.ZstdDecompressor()
with open(UpperCamelCase_,"rb" ) as ifh, open(UpperCamelCase_,"wb" ) as ofh:
dctx.copy_stream(UpperCamelCase_,UpperCamelCase_ )
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [b'\x42\x5A\x68']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with bza.open(UpperCamelCase_,"rb" ) as compressed_file:
with open(UpperCamelCase_,"wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_,UpperCamelCase_ )
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase_,exist_ok=UpperCamelCase_ )
with pyazr.SevenZipFile(UpperCamelCase_,"r" ) as archive:
archive.extractall(UpperCamelCase_ )
class UpperCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase_ = [b'\x04\x22\x4D\x18']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase_,"rb" ) as compressed_file:
with open(UpperCamelCase_,"wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_,UpperCamelCase_ )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] ):
return max(
len(UpperCamelCase_ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase_,UpperCamelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase_,magic_number_length=UpperCamelCase_ )
except OSError:
return b""
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : Union[Path, str],__A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",category=UpperCamelCase_,)
_lowerCamelCase : Tuple = cls.infer_extractor_format(UpperCamelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : Union[Path, str] ): # <Added version="2.4.0"/>
_lowerCamelCase : Optional[int] = cls._get_magic_number_max_length()
_lowerCamelCase : int = cls._read_magic_number(UpperCamelCase_,UpperCamelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase_,magic_number=UpperCamelCase_ ):
return extractor_format
@classmethod
def lowerCamelCase_ ( cls : Tuple,__A : Union[Path, str],__A : Union[Path, str],__A : Optional[str] = None,__A : Optional[BaseExtractor] = "deprecated",):
os.makedirs(os.path.dirname(UpperCamelCase_ ),exist_ok=UpperCamelCase_ )
# Prevent parallel extractions
_lowerCamelCase : str = str(Path(UpperCamelCase_ ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_,ignore_errors=UpperCamelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase_,UpperCamelCase_ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.",category=UpperCamelCase_,)
_lowerCamelCase : List[str] = extractor if extractor != "deprecated" else extractor_format
else:
_lowerCamelCase : Dict = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase_,UpperCamelCase_ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.",category=UpperCamelCase_,)
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase_ ):
return extractor.extract(UpperCamelCase_,UpperCamelCase_ )
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( _UpperCamelCase ):
lowerCAmelCase_ = ['input_features']
def __init__( self : Dict,__A : int=8_0,__A : Union[str, Any]=1_6_0_0_0,__A : Optional[int]=1_6_0,__A : Optional[Any]=3_0,__A : Dict=4_0_0,__A : Optional[Any]=0.0,__A : int=False,**__A : Tuple,):
super().__init__(
feature_size=__A,sampling_rate=__A,padding_value=__A,return_attention_mask=__A,**__A,)
_lowerCamelCase : List[str] = n_fft
_lowerCamelCase : Any = hop_length
_lowerCamelCase : Dict = chunk_length
_lowerCamelCase : Union[str, Any] = chunk_length * sampling_rate
_lowerCamelCase : Any = self.n_samples // hop_length
_lowerCamelCase : List[Any] = sampling_rate
_lowerCamelCase : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2,num_mel_filters=__A,min_frequency=0.0,max_frequency=8_0_0_0.0,sampling_rate=__A,norm="slaney",mel_scale="slaney",)
def lowerCamelCase_ ( self : Optional[Any],__A : np.array ):
_lowerCamelCase : Tuple = spectrogram(
__A,window_function(self.n_fft,"hann" ),frame_length=self.n_fft,hop_length=self.hop_length,power=2.0,mel_filters=self.mel_filters,log_mel="log10",)
_lowerCamelCase : Tuple = log_spec[:, :-1]
_lowerCamelCase : Any = np.maximum(__A,log_spec.max() - 8.0 )
_lowerCamelCase : Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase_ ( __A : List[np.ndarray],__A : List[np.ndarray],__A : float = 0.0 ):
if attention_mask is not None:
_lowerCamelCase : List[str] = np.array(__A,np.intaa )
_lowerCamelCase : List[str] = []
for vector, length in zip(__A,attention_mask.sum(-1 ) ):
_lowerCamelCase : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_lowerCamelCase : List[Any] = padding_value
normed_input_values.append(__A )
else:
_lowerCamelCase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : str,__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : bool = True,__A : Optional[int] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[bool] = None,__A : Optional[str] = "max_length",__A : Optional[int] = None,__A : Optional[int] = None,__A : Optional[bool] = None,**__A : List[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : str = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : Any = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : Union[str, Any] = [np.asarray([speech],dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Tuple = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
_lowerCamelCase : List[str] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=max_length if max_length else self.n_samples,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=return_attention_mask or do_normalize,)
# zero-mean and unit-variance normalization
if do_normalize:
_lowerCamelCase : Dict = self.zero_mean_unit_var_norm(
padded_inputs["input_features"],attention_mask=padded_inputs["attention_mask"],padding_value=self.padding_value,)
_lowerCamelCase : Dict = np.stack(padded_inputs["input_features"],axis=0 )
# make sure list is in array format
_lowerCamelCase : Tuple = padded_inputs.get("input_features" ).transpose(2,0,1 )
_lowerCamelCase : str = [self._np_extract_fbank_features(__A ) for waveform in input_features[0]]
if isinstance(input_features[0],__A ):
_lowerCamelCase : Optional[int] = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
else:
_lowerCamelCase : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowerCamelCase : Optional[int] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_lowerCamelCase : str = padded_inputs.convert_to_tensors(__A )
return padded_inputs
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
_lowerCamelCase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : int ):
_lowerCamelCase : Tuple = data
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[Any] = None
def A_ ( _lowerCAmelCase : Optional[int] ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A_ ( ): # Main function for testing.
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Node(1 )
_lowerCamelCase : Optional[Any] = Node(2 )
_lowerCamelCase : List[str] = Node(3 )
_lowerCamelCase : Dict = Node(4 )
_lowerCamelCase : Optional[int] = Node(5 )
_lowerCamelCase : Tuple = Node(6 )
_lowerCamelCase : str = Node(7 )
_lowerCamelCase : Tuple = Node(8 )
_lowerCamelCase : List[Any] = Node(9 )
print(is_full_binary_tree(a__ ) )
print(depth_of_tree(a__ ) )
print("Tree is: " )
display(a__ )
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class UpperCAmelCase__ ( __lowerCAmelCase ):
def __init__( self : Any,*__A : List[str],**__A : int ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead.",lowerCAmelCase_,)
super().__init__(*lowerCAmelCase_,**lowerCAmelCase_ ) | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : Any,__A : Optional[int]=1_3,__A : str=7,__A : Any=True,__A : Optional[Any]=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : List[Any]=9_9,__A : str=[1, 1, 2],__A : Tuple=1,__A : Tuple=3_2,__A : str=4,__A : Dict=8,__A : Any=3_7,__A : Dict="gelu_new",__A : Optional[int]=0.1,__A : int=0.1,__A : str=0.0,__A : Tuple=5_1_2,__A : List[Any]=3,__A : int=0.02,__A : List[str]=3,__A : Union[str, Any]=4,__A : Optional[int]=None,__A : Optional[Any]=False,):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Optional[int] = block_sizes
_lowerCamelCase : Optional[Any] = num_decoder_layers
_lowerCamelCase : Union[str, Any] = d_model
_lowerCamelCase : Tuple = n_head
_lowerCamelCase : Optional[Any] = d_head
_lowerCamelCase : Optional[int] = d_inner
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Dict = hidden_dropout
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : Dict = activation_dropout
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : Dict = scope
_lowerCamelCase : int = initializer_std
# Used in the tests to check the size of the first attention layer
_lowerCamelCase : Any = n_head
# Used in the tests to check the size of the first hidden state
_lowerCamelCase : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_lowerCamelCase : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_lowerCamelCase : Any = self.num_hidden_layers + 2
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : str = FunnelConfig(
vocab_size=self.vocab_size,block_sizes=self.block_sizes,num_decoder_layers=self.num_decoder_layers,d_model=self.d_model,n_head=self.n_head,d_head=self.d_head,d_inner=self.d_inner,hidden_act=self.hidden_act,hidden_dropout=self.hidden_dropout,attention_dropout=self.attention_dropout,activation_dropout=self.activation_dropout,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_std=self.initializer_std,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : List[str],__A : Union[str, Any],__A : Any,__A : Tuple,__A : List[str],__A : List[str],__A : Optional[Any],__A : Optional[Any],):
_lowerCamelCase : Optional[int] = TFFunnelModel(config=UpperCAmelCase__ )
_lowerCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Union[str, Any] = model(UpperCAmelCase__ )
_lowerCamelCase : Tuple = [input_ids, input_mask]
_lowerCamelCase : Dict = model(UpperCAmelCase__ )
_lowerCamelCase : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : str = False
_lowerCamelCase : Union[str, Any] = TFFunnelModel(config=UpperCAmelCase__ )
_lowerCamelCase : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = TFFunnelModel(config=UpperCAmelCase__ )
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],__A : List[str],__A : List[Any],__A : Optional[int],__A : Union[str, Any],__A : Optional[Any],__A : str,):
_lowerCamelCase : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
_lowerCamelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Optional[int] = model(UpperCAmelCase__ )
_lowerCamelCase : Optional[int] = [input_ids, input_mask]
_lowerCamelCase : List[str] = model(UpperCAmelCase__ )
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
_lowerCamelCase : List[str] = False
_lowerCamelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCAmelCase__ )
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 3, self.d_model) )
_lowerCamelCase : Any = False
_lowerCamelCase : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
_lowerCamelCase : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
def lowerCamelCase_ ( self : List[str],__A : Any,__A : Union[str, Any],__A : Any,__A : Dict,__A : List[str],__A : int,__A : Dict,):
_lowerCamelCase : Dict = TFFunnelForPreTraining(config=UpperCAmelCase__ )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : int,__A : Dict,__A : List[str],__A : List[str],__A : Optional[int],__A : int,):
_lowerCamelCase : List[str] = TFFunnelForMaskedLM(config=UpperCAmelCase__ )
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Tuple,__A : Dict,__A : Union[str, Any],__A : List[str],__A : List[str],__A : Union[str, Any],__A : str,__A : Any,):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Dict = TFFunnelForSequenceClassification(config=UpperCAmelCase__ )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[str] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Union[str, Any],__A : Any,__A : int,__A : List[str],__A : List[Any],__A : str,):
_lowerCamelCase : Dict = self.num_choices
_lowerCamelCase : Tuple = TFFunnelForMultipleChoice(config=UpperCAmelCase__ )
_lowerCamelCase : str = tf.tile(tf.expand_dims(UpperCAmelCase__,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase__,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowerCamelCase : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : List[str],__A : Optional[Any],__A : str,__A : int,__A : Any,__A : Optional[Any],):
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : str = TFFunnelForTokenClassification(config=UpperCAmelCase__ )
_lowerCamelCase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Dict,__A : List[Any],__A : List[Any],__A : Union[str, Any],__A : Optional[Any],__A : Dict,__A : List[str],__A : Optional[int],):
_lowerCamelCase : Optional[int] = TFFunnelForQuestionAnswering(config=UpperCAmelCase__ )
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : List[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[int] = TFFunnelModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self,config_class=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@require_tf
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = TFFunnelModelTester(self,base=UpperCAmelCase__ )
_lowerCamelCase : Any = ConfigTester(self,config_class=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
from collections import deque
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = len(__a )
_lowerCamelCase : Dict = deque()
_lowerCamelCase : Union[str, Any] = [False for _ in range(__a )]
_lowerCamelCase : Dict = [-1 for _ in range(__a )]
_lowerCamelCase : Dict = index_of[:]
def strong_connect(_lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
_lowerCamelCase : Tuple = index # the number when this node is seen
_lowerCamelCase : Union[str, Any] = index # lowest rank node reachable from here
index += 1
stack.append(__a )
_lowerCamelCase : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
_lowerCamelCase : List[str] = strong_connect(__a , __a , __a )
_lowerCamelCase : Optional[int] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_lowerCamelCase : int = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Optional[Any] = stack.pop()
_lowerCamelCase : str = False
component.append(__a )
while w != v:
_lowerCamelCase : Any = stack.pop()
_lowerCamelCase : Any = False
component.append(__a )
components.append(__a )
return index
_lowerCamelCase : Tuple = []
for v in range(__a ):
if index_of[v] == -1:
strong_connect(__a , 0 , __a )
return components
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = [[] for _ in range(__a )]
for u, v in edges:
g[u].append(__a )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : Dict = 7
UpperCAmelCase_ : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Tuple = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : Dict = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Dict = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ : Tuple = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
from collections import Counter
from timeit import timeit
def A_ ( _lowerCAmelCase : str = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def A_ ( _lowerCAmelCase : str = "" ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return True
_lowerCamelCase : Union[str, Any] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCamelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCamelCase : Dict = character_freq_dict.get(_lowerCAmelCase , 0 ) + 1
_lowerCamelCase : str = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( _lowerCAmelCase : str = "" ):
"""simple docstring"""
print("\nFor string = " , _lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
UpperCAmelCase_ : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''') | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCAmelCase_ : int = False
class UpperCAmelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowerCamelCase : int = "A painting of a squirrel eating a burger "
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
prompt=_lowerCamelCase,generator=_lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCamelCase )
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=_lowerCamelCase,generator=_lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Any = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion",torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = "A painting of a squirrel eating a burger "
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : str = pipe(
prompt=_lowerCamelCase,generator=_lowerCamelCase,guidance_scale=7.5,num_inference_steps=5_0,output_type="numpy" ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = u
for i in range(1 , lowerCamelCase_ ):
_lowerCamelCase : Optional[Any] = temp * (u - i)
return temp
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = int(input("enter the numbers of values: " ) )
_lowerCamelCase : list[list[float]] = []
for _ in range(lowerCamelCase_ ):
y.append([] )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
y[i].append(lowerCamelCase_ )
_lowerCamelCase : int = 0
print("enter the values of parameters in a list: " )
_lowerCamelCase : Optional[int] = list(map(lowerCamelCase_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCamelCase_ ):
_lowerCamelCase : List[str] = float(input() )
_lowerCamelCase : Union[str, Any] = int(input("enter the value to interpolate: " ) )
_lowerCamelCase : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase_ ):
for j in range(n - i ):
_lowerCamelCase : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase : str = y[0][0]
for i in range(1 , lowerCamelCase_ ):
summ += (ucal(lowerCamelCase_ , lowerCamelCase_ ) * y[0][i]) / math.factorial(lowerCamelCase_ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main() | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = len(UpperCamelCase__ )
for i in range(1 , UpperCamelCase__ ):
_lowerCamelCase : List[Any] = collection[i]
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = i - 1
while low <= high:
_lowerCamelCase : Optional[Any] = (low + high) // 2
if val < collection[mid]:
_lowerCamelCase : Optional[int] = mid - 1
else:
_lowerCamelCase : List[Any] = mid + 1
for j in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
_lowerCamelCase : Any = collection[j - 1]
_lowerCamelCase : str = val
return collection
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted)) | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ):
lowerCAmelCase_ = GPTaTokenizer
lowerCAmelCase_ = GPTaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : List[str] = dict(zip(lowercase_,range(len(lowercase_ ) ) ) )
_lowerCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Any = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def lowerCamelCase_ ( self : Dict,**__A : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname,**lowercase_ )
def lowerCamelCase_ ( self : Any,**__A : Any ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname,**lowercase_ )
def lowerCamelCase_ ( self : Dict,__A : List[Any] ):
_lowerCamelCase : int = "lower newer"
_lowerCamelCase : Any = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Tuple = GPTaTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : List[Any] = "lower newer"
_lowerCamelCase : str = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : str = tokenizer.tokenize(lowercase_,add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
_lowerCamelCase : int = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),lowercase_ )
def lowerCamelCase_ ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[str] = tokenizer.tokenize(lowercase_,add_prefix_space=lowercase_ )
_lowerCamelCase : int = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
# Testing conversion to ids without special tokens
_lowerCamelCase : int = tokenizer.encode(lowercase_,add_special_tokens=lowercase_,add_prefix_space=lowercase_ )
_lowerCamelCase : int = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
# Testing conversion to ids with special tokens
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
_lowerCamelCase : str = tokenizer.encode(lowercase_,add_prefix_space=lowercase_ )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
# Testing the unknown token
_lowerCamelCase : int = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[int] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ),lowercase_ )
def lowerCamelCase_ ( self : List[str],*__A : Union[str, Any],**__A : Union[str, Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : Tuple,__A : List[str]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
_lowerCamelCase : int = "This is a simple input"
_lowerCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : str = ("This is a simple input", "This is a pair")
_lowerCamelCase : Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding="max_length" )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding="max_length" )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding="max_length",)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding="max_length" )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding="max_length" )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding="max_length",)
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Optional[Any] = ("This is a simple input", "This is a pair")
_lowerCamelCase : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Optional[int] = tokenizer.pad_token_id
_lowerCamelCase : Optional[int] = tokenizer(lowercase_,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : Optional[int] = tokenizer(lowercase_,padding=lowercase_,truncate=lowercase_,return_tensors="np" )
_lowerCamelCase : List[str] = tokenizer(*lowercase_,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(lowercase_,padding=lowercase_,truncate=lowercase_,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = "$$$"
_lowerCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname,bos_token=lowercase_,add_bos_token=lowercase_ )
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = tokenizer.bos_token_id
_lowerCamelCase : Optional[int] = tokenizer(lowercase_ )
_lowerCamelCase : Optional[int] = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0],lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Optional[Any] ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
_lowerCamelCase : Dict = [self.get_tokenizer(do_lower_case=lowercase_,add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_lowerCamelCase : Dict = "Encode this."
_lowerCamelCase : Dict = "This one too please."
_lowerCamelCase : str = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] = tokenizer.encode_plus(
lowercase_,lowercase_,add_special_tokens=lowercase_,return_special_tokens_mask=lowercase_,)
_lowerCamelCase : List[str] = encoded_sequence_dict["input_ids"]
_lowerCamelCase : Tuple = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(lowercase_ ),len(lowercase_ ) )
_lowerCamelCase : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
_lowerCamelCase : Tuple = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_,lowercase_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/opt-350m",from_slow=lowercase_ )
_lowerCamelCase : str = "A photo of a cat"
_lowerCamelCase : int = tokenizer.encode(
lowercase_,)
self.assertEqual(lowercase_,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("./test_opt" )
_lowerCamelCase : int = tokenizer.encode(
lowercase_,)
self.assertEqual(lowercase_,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("facebook/opt-350m",use_slow=lowercase_ )
_lowerCamelCase : List[str] = "A photo of a cat"
_lowerCamelCase : Dict = tokenizer.encode(
lowercase_,)
# Same as above
self.assertEqual(lowercase_,[2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("facebook/opt-350m",from_slow=lowercase_ )
_lowerCamelCase : int = "bos"
_lowerCamelCase : int = tokenizer.get_vocab()["bos"]
_lowerCamelCase : List[Any] = "A photo of a cat"
_lowerCamelCase : Optional[int] = tokenizer.encode(
lowercase_,)
# We changed the bos token
self.assertEqual(lowercase_,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_lowerCamelCase : Any = tokenizer.encode(
lowercase_,)
self.assertEqual(lowercase_,[3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BigBirdConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
_lowerCamelCase : str = BigBirdForQuestionAnswering(_lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = BigBirdForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_lowerCAmelCase , _lowerCAmelCase , is_trivia_qa=_lowerCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCAmelCase_ : Tuple = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase_ : Tuple = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase_ : List[str] = [2, 4, 1, 5]
UpperCAmelCase_ : Tuple = len(train_data)
UpperCAmelCase_ : Optional[Any] = 0.009
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any="train" ):
"""simple docstring"""
return calculate_hypothesis_value(_lowerCamelCase , _lowerCamelCase ) - output(
_lowerCamelCase , _lowerCamelCase )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=m ):
"""simple docstring"""
_lowerCamelCase : int = 0
for i in range(_lowerCamelCase ):
if index == -1:
summation_value += _error(_lowerCamelCase )
else:
summation_value += _error(_lowerCamelCase ) * train_data[i][0][index]
return summation_value
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = summation_of_cost_derivative(_lowerCamelCase , _lowerCamelCase ) / m
return cost_derivative_value
def A_ ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : List[str] = 0.0_0_0_0_0_2
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = 0
while True:
j += 1
_lowerCamelCase : int = [0, 0, 0, 0]
for i in range(0 , len(_lowerCamelCase ) ):
_lowerCamelCase : Union[str, Any] = get_cost_derivative(i - 1 )
_lowerCamelCase : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase , rtol=_lowerCamelCase , ):
break
_lowerCamelCase : Any = temp_parameter_vector
print(("Number of iterations:", j) )
def A_ ( ):
"""simple docstring"""
for i in range(len(_lowerCamelCase ) ):
print(("Actual output value:", output(_lowerCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_lowerCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent() | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any],__A : Tuple,__A : Union[str, Any]=7,__A : Optional[int]=3,__A : str=3_0,__A : List[str]=4_0_0,__A : Optional[Any]=True,__A : str=None,__A : Tuple=0.9,__A : Union[str, Any]=None,__A : int=True,__A : List[Any]=[0.5, 0.5, 0.5],__A : str=[0.5, 0.5, 0.5],):
_lowerCamelCase : List[str] = size if size is not None else {"shortest_edge": 3_0}
_lowerCamelCase : int = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0}
_lowerCamelCase : Any = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Optional[Any] = min_resolution
_lowerCamelCase : Optional[Any] = max_resolution
_lowerCamelCase : List[str] = do_resize_and_center_crop
_lowerCamelCase : Dict = size
_lowerCamelCase : List[str] = crop_pct
_lowerCamelCase : Union[str, Any] = crop_size
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Tuple = image_mean
_lowerCamelCase : str = image_std
def lowerCamelCase_ ( self : Optional[Any] ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase,"size" ) )
self.assertTrue(hasattr(__UpperCamelCase,"crop_pct" ) )
self.assertTrue(hasattr(__UpperCamelCase,"do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase,"image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase,"image_std" ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"shortest_edge": 3_0} )
self.assertEqual(image_processor.crop_size,{"height": 3_0, "width": 3_0} )
_lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict,size=4_2,crop_size=8_4 )
self.assertEqual(image_processor.size,{"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size,{"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self : Dict ):
pass
def lowerCamelCase_ ( self : List[Any] ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase,Image.Image )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : List[str] = image_processing(__UpperCamelCase,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__UpperCamelCase,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase,np.ndarray )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Any = image_processing(__UpperCamelCase,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : List[Any] ):
# Initialize image_processing
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester,equal_resolution=__UpperCamelCase,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase,torch.Tensor )
# Test not batched input
_lowerCamelCase : Any = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : List[str] = image_processing(__UpperCamelCase,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),) | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = Dict[str, Any]
UpperCAmelCase_ : List[Any] = List[Prediction]
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
def __init__( self : int,*__A : List[Any],**__A : Optional[int] ):
super().__init__(*__A,**__A )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self,"vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self : List[str],**__A : Tuple ):
_lowerCamelCase : Optional[int] = {}
if "threshold" in kwargs:
_lowerCamelCase : Optional[int] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple,*__A : Dict,**__A : Union[str, Any] ):
return super().__call__(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : Any ):
_lowerCamelCase : List[Any] = load_image(__A )
_lowerCamelCase : Dict = torch.IntTensor([[image.height, image.width]] )
_lowerCamelCase : Dict = self.image_processor(images=[image],return_tensors="pt" )
if self.tokenizer is not None:
_lowerCamelCase : Optional[Any] = self.tokenizer(text=inputs["words"],boxes=inputs["boxes"],return_tensors="pt" )
_lowerCamelCase : Tuple = target_size
return inputs
def lowerCamelCase_ ( self : List[str],__A : List[str] ):
_lowerCamelCase : Tuple = model_inputs.pop("target_size" )
_lowerCamelCase : Tuple = self.model(**__A )
_lowerCamelCase : Optional[Any] = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_lowerCamelCase : List[str] = model_inputs["""bbox"""]
return model_outputs
def lowerCamelCase_ ( self : Tuple,__A : Optional[int],__A : int=0.9 ):
_lowerCamelCase : Optional[int] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCamelCase : List[str] = target_size[0].tolist()
def unnormalize(__A : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
_lowerCamelCase : Tuple = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowerCamelCase : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCamelCase : str = [unnormalize(__A ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
_lowerCamelCase : str = ["""score""", """label""", """box"""]
_lowerCamelCase : Optional[int] = [dict(zip(__A,__A ) ) for vals in zip(scores.tolist(),__A,__A ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCamelCase : Optional[int] = self.image_processor.post_process_object_detection(__A,__A,__A )
_lowerCamelCase : Tuple = raw_annotations[0]
_lowerCamelCase : Tuple = raw_annotation["""scores"""]
_lowerCamelCase : Any = raw_annotation["""labels"""]
_lowerCamelCase : List[Any] = raw_annotation["""boxes"""]
_lowerCamelCase : List[Any] = scores.tolist()
_lowerCamelCase : Union[str, Any] = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCamelCase : Optional[int] = [self._get_bounding_box(__A ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCamelCase : Tuple = ["""score""", """label""", """box"""]
_lowerCamelCase : Optional[int] = [
dict(zip(__A,__A ) )
for vals in zip(raw_annotation["scores"],raw_annotation["labels"],raw_annotation["boxes"] )
]
return annotation
def lowerCamelCase_ ( self : Optional[Any],__A : Union[str, Any] ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_lowerCamelCase : Tuple = box.int().tolist()
_lowerCamelCase : Union[str, Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return "".join(sorted(lowerCAmelCase__ ) )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return word_by_signature[signature(lowerCAmelCase__ )]
UpperCAmelCase_ : Optional[int] = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
UpperCAmelCase_ : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
UpperCAmelCase_ : Any = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCAmelCase_ : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : Any = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
lowerCAmelCase_ = PegasusConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = "gelu"
def __init__( self : Optional[int],__A : Union[str, Any],__A : List[str]=1_3,__A : Any=7,__A : Optional[Any]=True,__A : Any=False,__A : str=9_9,__A : Optional[Any]=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[int]=0.1,__A : Tuple=0.1,__A : Optional[int]=2_0,__A : Optional[int]=2,__A : Union[str, Any]=1,__A : str=0,):
_lowerCamelCase : str = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Dict = eos_token_id
_lowerCamelCase : Optional[int] = pad_token_id
_lowerCamelCase : Optional[Any] = bos_token_id
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size ).clip(3,self.vocab_size )
_lowerCamelCase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ),1 )
_lowerCamelCase : Tuple = np.concatenate([input_ids, eos_tensor],axis=1 )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : int = self.config_cls(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_ids=[2],bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,decoder_start_token_id=self.pad_token_id,**self.config_updates,)
_lowerCamelCase : Any = prepare_pegasus_inputs_dict(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_ )
return config, inputs_dict
def lowerCamelCase_ ( self : List[Any],__A : Any,__A : Optional[int],__A : Union[str, Any] ):
_lowerCamelCase : int = 2_0
_lowerCamelCase : Optional[Any] = model_class_name(lowerCamelCase_ )
_lowerCamelCase : List[str] = model.encode(inputs_dict["input_ids"] )
_lowerCamelCase , _lowerCamelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowerCamelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0],lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length),dtype="i4" )
_lowerCamelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),)
_lowerCamelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1],lowerCamelCase_,decoder_attention_mask=lowerCamelCase_,past_key_values=lowerCamelCase_,decoder_position_ids=lowerCamelCase_,)
_lowerCamelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype="i4" )
_lowerCamelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:],lowerCamelCase_,decoder_attention_mask=lowerCamelCase_,past_key_values=outputs_cache.past_key_values,decoder_position_ids=lowerCamelCase_,)
_lowerCamelCase : str = model.decode(lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3,msg=f'Max diff is {diff}' )
def lowerCamelCase_ ( self : List[Any],__A : Tuple,__A : Union[str, Any],__A : Optional[Any] ):
_lowerCamelCase : Optional[Any] = 2_0
_lowerCamelCase : Optional[int] = model_class_name(lowerCamelCase_ )
_lowerCamelCase : str = model.encode(inputs_dict["input_ids"] )
_lowerCamelCase , _lowerCamelCase : Dict = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowerCamelCase : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
],axis=-1,)
_lowerCamelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0],lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),)
_lowerCamelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1],lowerCamelCase_,decoder_attention_mask=lowerCamelCase_,past_key_values=lowerCamelCase_,decoder_position_ids=lowerCamelCase_,)
_lowerCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype="i4" )
_lowerCamelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:],lowerCamelCase_,past_key_values=outputs_cache.past_key_values,decoder_attention_mask=lowerCamelCase_,decoder_position_ids=lowerCamelCase_,)
_lowerCamelCase : Dict = model.decode(lowerCamelCase_,lowerCamelCase_,decoder_attention_mask=lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3,msg=f'Max diff is {diff}' )
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCamelCase : Tuple = np.not_equal(_lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( a__ , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : str = FlaxPegasusModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self,config_class=lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Optional[Any] = self._prepare_for_class(lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(__A : str,__A : Any=None,**__A : List[Any] ):
return model.encode(input_ids=lowerCamelCase_,attention_mask=lowerCamelCase_ )
with self.subTest("JIT Enabled" ):
_lowerCamelCase : Tuple = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowerCamelCase : Any = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ),len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_,lowerCamelCase_ ):
self.assertEqual(jitted_output.shape,output.shape )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Optional[int] = model_class(lowerCamelCase_ )
_lowerCamelCase : Optional[int] = model.encode(inputs_dict["input_ids"],inputs_dict["attention_mask"] )
_lowerCamelCase : Dict = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__A : Union[str, Any],__A : List[Any],__A : List[str] ):
return model.decode(
decoder_input_ids=lowerCamelCase_,decoder_attention_mask=lowerCamelCase_,encoder_outputs=lowerCamelCase_,)
with self.subTest("JIT Enabled" ):
_lowerCamelCase : int = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowerCamelCase : Dict = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ),len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_,lowerCamelCase_ ):
self.assertEqual(jitted_output.shape,output.shape )
@slow
def lowerCamelCase_ ( self : str ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class_name.from_pretrained("google/pegasus-large",from_pt=lowerCamelCase_ )
_lowerCamelCase : Optional[Any] = np.ones((1, 1) )
_lowerCamelCase : str = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_lowerCamelCase : List[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_lowerCamelCase : Dict = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_lowerCamelCase : List[str] = [
"California\'s largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.",
]
_lowerCamelCase : List[str] = tokenizer(lowerCamelCase_,return_tensors="np",truncation=lowerCamelCase_,max_length=5_1_2,padding=lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = model.generate(**lowerCamelCase_,num_beams=2 ).sequences
_lowerCamelCase : Optional[int] = tokenizer.batch_decode(lowerCamelCase_,skip_special_tokens=lowerCamelCase_ )
assert tgt_text == decoded | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowerCamelCase_ ( self : Optional[Any] ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ),supervised_keys=UpperCamelCase__,)
def lowerCamelCase_ ( self : Dict,__A : Optional[int],__A : Optional[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN,gen_kwargs={"examples": get_test_dummy_examples()} )]
def lowerCamelCase_ ( self : Optional[Any],__A : int,__A : Dict ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowerCamelCase_ ( self : Dict ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ),supervised_keys=UpperCamelCase__,)
def lowerCamelCase_ ( self : str,__A : List[Any],__A : Dict ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,gen_kwargs={"examples": get_test_nested_examples()} )
]
def lowerCamelCase_ ( self : Union[str, Any],__A : Tuple,__A : str ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
def A_ ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def A_ ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class UpperCAmelCase__ ( lowercase_ ):
@require_beam
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Tuple = DummyBeamDataset(cache_dir=UpperCamelCase__,beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__,builder.name,"default","0.0.0",f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features,datasets.Features({"content": datasets.Value("string" )} ) )
_lowerCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples,UpperCamelCase__ )
self.assertDictEqual(dset["train"][0],get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1],get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self : Dict ):
import apache_beam as beam
_lowerCamelCase : Any = beam.io.parquetio.WriteToParquet
_lowerCamelCase : str = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Dict = DummyBeamDataset(cache_dir=UpperCamelCase__,beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_lowerCamelCase : List[str] = partial(UpperCamelCase__,num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__,builder.name,"default","0.0.0",f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__,builder.name,"default","0.0.0",f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features,datasets.Features({"content": datasets.Value("string" )} ) )
_lowerCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples,UpperCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ),sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : List[Any] = DummyBeamDataset(cache_dir=UpperCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions,builder.download_and_prepare )
@require_beam
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Optional[Any] = NestedBeamDataset(cache_dir=UpperCamelCase__,beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__,builder.name,"default","0.0.0",f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features,datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_lowerCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,UpperCamelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples,UpperCamelCase__ )
self.assertDictEqual(dset["train"][0],get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1],get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class UpperCAmelCase__ ( __UpperCAmelCase ):
def __init__( self : Dict,**__A : Any ):
super().__init__(**lowerCAmelCase_ )
requires_backends(self,"vision" )
requires_backends(self,"torch" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowerCAmelCase_ )
def lowerCamelCase_ ( self : str,**__A : str ):
_lowerCamelCase : int = {}
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
_lowerCamelCase : Union[str, Any] = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
_lowerCamelCase : int = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
_lowerCamelCase : Union[str, Any] = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
_lowerCamelCase : Tuple = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
_lowerCamelCase : str = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
_lowerCamelCase : Any = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
_lowerCamelCase : str = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
_lowerCamelCase : Tuple = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
_lowerCamelCase : List[str] = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
_lowerCamelCase : List[str] = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
_lowerCamelCase : Optional[int] = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
_lowerCamelCase : Optional[int] = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any],__A : List[str],*__A : Any,__A : Optional[int]=None,__A : List[str]=None,**__A : str ):
return super().__call__(lowerCAmelCase_,*lowerCAmelCase_,num_workers=lowerCAmelCase_,batch_size=lowerCAmelCase_,**lowerCAmelCase_ )
def lowerCamelCase_ ( self : Dict,__A : Optional[Any],__A : Optional[Any]=6_4,__A : int = 0,__A : float = 5_1_2 / 1_5_0_0,__A : Optional[int] = 3_2,__A : Optional[int] = 1,):
_lowerCamelCase : Optional[int] = load_image(lowerCAmelCase_ )
_lowerCamelCase : Dict = self.image_processor.size["longest_edge"]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = self.image_processor.generate_crop_boxes(
lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_ )
_lowerCamelCase : Union[str, Any] = self.image_processor(images=lowerCAmelCase_,return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
_lowerCamelCase : str = self.get_inference_context()
with inference_context():
_lowerCamelCase : Any = self._ensure_tensor_on_device(lowerCAmelCase_,device=self.device )
_lowerCamelCase : Optional[Any] = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
_lowerCamelCase : List[Any] = image_embeddings
_lowerCamelCase : List[Any] = grid_points.shape[1]
_lowerCamelCase : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0,lowerCAmelCase_,lowerCAmelCase_ ):
_lowerCamelCase : int = grid_points[:, i : i + points_per_batch, :, :]
_lowerCamelCase : Union[str, Any] = input_labels[:, i : i + points_per_batch]
_lowerCamelCase : Union[str, Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase_ ( self : Any,__A : List[Any],__A : Any=0.88,__A : Tuple=0.95,__A : str=0,__A : Dict=1,):
_lowerCamelCase : Union[str, Any] = model_inputs.pop("input_boxes" )
_lowerCamelCase : Dict = model_inputs.pop("is_last" )
_lowerCamelCase : int = model_inputs.pop("original_sizes" ).tolist()
_lowerCamelCase : str = model_inputs.pop("reshaped_input_sizes" ).tolist()
_lowerCamelCase : int = self.model(**lowerCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_lowerCamelCase : Union[str, Any] = model_outputs["pred_masks"]
_lowerCamelCase : Any = self.image_processor.post_process_masks(
lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,binarize=lowerCAmelCase_ )
_lowerCamelCase : List[Any] = model_outputs["iou_scores"]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.image_processor.filter_masks(
masks[0],iou_scores[0],original_sizes[0],input_boxes[0],lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase_ ( self : List[Any],__A : Optional[int],__A : Any=False,__A : List[Any]=False,__A : Tuple=0.7,):
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Tuple = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
_lowerCamelCase : Dict = torch.cat(lowerCAmelCase_ )
_lowerCamelCase : Tuple = torch.cat(lowerCAmelCase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_,lowerCAmelCase_ )
_lowerCamelCase : Tuple = defaultdict(lowerCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase_ )
_lowerCamelCase : List[Any] = {}
if output_rle_mask:
_lowerCamelCase : Optional[int] = rle_mask
if output_bboxes_mask:
_lowerCamelCase : int = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_lowerCamelCase : int = True if '''large''' in model_name or '''huge''' in model_name else False
_lowerCamelCase : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
_lowerCamelCase : List[str] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowerCamelCase : int = [3, 3, 3, 3]
_lowerCamelCase : str = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowerCamelCase : List[str] = [4, 4, 4, 4]
_lowerCamelCase : Union[str, Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowerCamelCase : Any = [3, 3, 3, 3]
if "lrf" in model_name:
_lowerCamelCase : Optional[Any] = [3, 3, 3, 3]
else:
_lowerCamelCase : Any = [2, 2, 2, 2]
if "tiny" in model_name:
_lowerCamelCase : Dict = 96
elif "small" in model_name:
_lowerCamelCase : Any = 96
elif "base" in model_name:
_lowerCamelCase : Union[str, Any] = 128
elif "large" in model_name:
_lowerCamelCase : List[Any] = 192
elif "xlarge" in model_name:
_lowerCamelCase : List[Any] = 256
elif "huge" in model_name:
_lowerCamelCase : Optional[Any] = 352
# set label information
_lowerCamelCase : Dict = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_lowerCamelCase : Optional[Any] = '''imagenet-22k-id2label.json'''
else:
_lowerCamelCase : Union[str, Any] = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Dict = {int(a_ ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = FocalNetConfig(
embed_dim=a_ , depths=a_ , focal_levels=a_ , focal_windows=a_ , use_conv_embed=a_ , idalabel=a_ , labelaid=a_ , use_post_layernorm=a_ , use_layerscale=a_ , )
return config
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if "patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_lowerCamelCase : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_lowerCamelCase : Any = '''encoder.''' + name
if "encoder.layers" in name:
_lowerCamelCase : Any = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
_lowerCamelCase : Optional[int] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
_lowerCamelCase : int = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowerCamelCase : int = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowerCamelCase : Union[str, Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowerCamelCase : Tuple = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
_lowerCamelCase : Tuple = '''layernorm.weight'''
if name == "norm.bias":
_lowerCamelCase : List[Any] = '''layernorm.bias'''
if "head" in name:
_lowerCamelCase : List[Any] = name.replace("head" , "classifier" )
else:
_lowerCamelCase : Tuple = '''focalnet.''' + name
return name
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_lowerCamelCase : Optional[int] = model_name_to_url[model_name]
print("Checkpoint URL: " , a_ )
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )['''model''']
# rename keys
for key in state_dict.copy().keys():
_lowerCamelCase : Union[str, Any] = state_dict.pop(a_ )
_lowerCamelCase : Union[str, Any] = val
_lowerCamelCase : Optional[int] = get_focalnet_config(a_ )
_lowerCamelCase : Union[str, Any] = FocalNetForImageClassification(a_ )
model.eval()
# load state dict
model.load_state_dict(a_ )
# verify conversion
_lowerCamelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Tuple = BitImageProcessor(
do_resize=a_ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=a_ , crop_size=224 , do_normalize=a_ , image_mean=a_ , image_std=a_ , )
_lowerCamelCase : Dict = Image.open(requests.get(a_ , stream=a_ ).raw )
_lowerCamelCase : Union[str, Any] = processor(images=a_ , return_tensors="pt" )
_lowerCamelCase : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowerCamelCase : List[str] = image_transforms(a_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a_ , atol=1E-4 )
_lowerCamelCase : Optional[Any] = model(**a_ )
_lowerCamelCase : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowerCamelCase : Optional[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
_lowerCamelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
_lowerCamelCase : Any = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
_lowerCamelCase : List[str] = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
_lowerCamelCase : Any = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
_lowerCamelCase : Dict = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Optional[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( _a ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Any,__A : int=None,__A : Optional[int]=None,__A : List[str]="<|endoftext|>",__A : Optional[int]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : str=False,__A : Union[str, Any]=True,**__A : Dict,):
super().__init__(
ByteLevelBPETokenizer(
vocab=_A,merges=_A,add_prefix_space=_A,trim_offsets=_A,),bos_token=_A,eos_token=_A,unk_token=_A,**_A,)
_lowerCamelCase : Dict = add_prefix_space
def lowerCamelCase_ ( self : Dict,__A : List[Any],__A : int=None ):
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : int,__A : Dict,__A : int = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
debug_launcher(test_script.main )
def lowerCamelCase_ ( self : Tuple ):
debug_launcher(test_ops.main ) | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCamelCase_ ( self : List[Any] ):
super().setUp()
_lowerCamelCase : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : Any,__A : Optional[int] ):
_lowerCamelCase : List[str] = "UNwant\u00E9d,running"
_lowerCamelCase : List[str] = "unwanted, running"
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Dict = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__A,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),[9, 6, 7, 1_2, 1_0, 1_1] )
def lowerCamelCase_ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : str = self.get_rust_tokenizer()
_lowerCamelCase : Optional[Any] = "UNwant\u00E9d,running"
_lowerCamelCase : str = tokenizer.tokenize(__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Optional[int] = tokenizer.encode(__A,add_special_tokens=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Any = self.get_rust_tokenizer()
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : List[Any] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# With lower casing
_lowerCamelCase : Optional[int] = self.get_tokenizer(do_lower_case=__A )
_lowerCamelCase : Tuple = self.get_rust_tokenizer(do_lower_case=__A )
_lowerCamelCase : List[str] = "UNwant\u00E9d,running"
_lowerCamelCase : Tuple = tokenizer.tokenize(__A )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : int = tokenizer.encode(__A,add_special_tokens=__A )
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : List[Any] = self.get_rust_tokenizer()
_lowerCamelCase : List[str] = tokenizer.encode(__A )
_lowerCamelCase : Any = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ),["ah", "\u535A", "\u63A8", "zz"] )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ),["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ),["hello"] )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Dict = BasicTokenizer(do_lower_case=__A,strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ),["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ),["h\u00E9llo"] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = BasicTokenizer(do_lower_case=__A,strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ),["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ),["hello"] )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ),["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ),["hello"] )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ),["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = BasicTokenizer(do_lower_case=__A,strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ),["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : int = BasicTokenizer(do_lower_case=__A,strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ),["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = BasicTokenizer(do_lower_case=__A,never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ),["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Dict = BasicTokenizer()
_lowerCamelCase : List[Any] = "a\n\'ll !!to?\'d of, can\'t."
_lowerCamelCase : Any = ["a", "\'", "ll", "!", "!", "to", "?", "\'", "d", "of", ",", "can", "\'", "t", "."]
self.assertListEqual(tokenizer.tokenize(__A ),__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_lowerCamelCase : int = {}
for i, token in enumerate(__A ):
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : int = WordpieceTokenizer(vocab=__A,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ),[] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ),["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ),["[UNK]", "runn", "##ing"] )
def lowerCamelCase_ ( self : List[Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCamelCase_ ( self : Dict ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["Test", "\xad", "test"]],[["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["Test", "\xad", "test"]],[["[UNK]"], [], ["[UNK]"]] )
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Dict = tokenizer.encode("sequence builders",add_special_tokens=__A )
_lowerCamelCase : List[Any] = tokenizer.encode("multi-sequence build",add_special_tokens=__A )
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(__A )
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__A,__A )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def lowerCamelCase_ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : List[str] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_lowerCamelCase : Any = tokenizer_r.encode_plus(
__A,return_attention_mask=__A,return_token_type_ids=__A,return_offsets_mapping=__A,add_special_tokens=__A,)
_lowerCamelCase : str = tokenizer_r.do_lower_case if hasattr(__A,"do_lower_case" ) else False
_lowerCamelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results],tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results],tokens["offset_mapping"] )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Any = ["的", "人", "有"]
_lowerCamelCase : Union[str, Any] = "".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : str = True
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : Tuple = tokenizer_p.encode(__A,add_special_tokens=__A )
_lowerCamelCase : Optional[Any] = tokenizer_r.encode(__A,add_special_tokens=__A )
_lowerCamelCase : Dict = tokenizer_r.convert_ids_to_tokens(__A )
_lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A,__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Any = False
_lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : Optional[int] = tokenizer_r.encode(__A,add_special_tokens=__A )
_lowerCamelCase : Any = tokenizer_p.encode(__A,add_special_tokens=__A )
_lowerCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__A )
_lowerCamelCase : int = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCamelCase : Dict = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A,__A )
self.assertListEqual(__A,__A ) | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase_ : List[Any] = "1"
UpperCAmelCase_ : Optional[int] = "0"
UpperCAmelCase_ : str = "1"
UpperCAmelCase_ : str = ort.SessionOptions()
UpperCAmelCase_ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
UpperCAmelCase_ : Union[str, Any] = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
UpperCAmelCase_ : Optional[int] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase_ : str = ort.RunOptions()
UpperCAmelCase_ : Optional[int] = 128
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ : List[Any] = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase_ : List[str] = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
UpperCAmelCase_ : int = time.time()
UpperCAmelCase_ : Any = 2000
UpperCAmelCase_ : Union[str, Any] = {}
for iter in range(max_iters):
UpperCAmelCase_ : Union[str, Any] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters)) | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCAmelCase_ : List[str] = 'src/transformers'
# Matches is_xxx_available()
UpperCAmelCase_ : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
UpperCAmelCase_ : Tuple = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ : int = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ : List[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ : Optional[int] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ : Dict = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ : Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
UpperCAmelCase_ : List[Any] = re.compile(R'^\s*try:')
# Catches a line with else:
UpperCAmelCase_ : List[Any] = re.compile(R'^\s*else:')
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if _re_test_backend.search(snake_case_ ) is None:
return None
_lowerCamelCase : Optional[Any] = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with open(snake_case_ , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : str = f.readlines()
_lowerCamelCase : Union[str, Any] = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : int = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
_lowerCamelCase : Optional[int] = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
_lowerCamelCase : Optional[Any] = re.findall("\[([^\]]+)\]" , snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_lowerCamelCase : Optional[Any] = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
_lowerCamelCase : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_lowerCamelCase : int = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
_lowerCamelCase : Tuple = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(", " )
_lowerCamelCase : List[str] = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
_lowerCamelCase : List[Any] = _re_between_brackets.search(snake_case_ ).groups()[0].split(", " )
_lowerCamelCase : int = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : Dict = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_lowerCamelCase : Tuple = lines[line_index]
_lowerCamelCase : Any = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_lowerCamelCase : Dict = lines[line_index]
_lowerCamelCase : Optional[int] = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
def find_duplicates(_lowerCAmelCase : List[str] ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : Optional[int] = []
for key in import_dict_objects.keys():
_lowerCamelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
_lowerCamelCase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : Any = '''base imports''' if key == '''none''' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
_lowerCamelCase : List[Any] = os.path.join(snake_case_ , "__init__.py" )
_lowerCamelCase : List[Any] = parse_init(snake_case_ )
if objects is not None:
_lowerCamelCase : int = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
_lowerCamelCase : Dict = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError("\n\n".join(snake_case_ ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob("*.py" ) ) ) == 0:
continue
_lowerCamelCase : str = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
_lowerCamelCase : Optional[int] = short_path.replace(os.path.sep , "." )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : Optional[Any] = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
_lowerCamelCase : Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(snake_case_ )
return submodules
UpperCAmelCase_ : Optional[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(snake_case_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCamelCase : str = spec.loader.load_module()
_lowerCamelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case_ ) > 0:
_lowerCamelCase : Optional[Any] = '''\n'''.join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase__ ( lowercase_ ):
def __init__( self : List[str],__A : List[Any],__A : str=1_3,__A : List[Any]=7,__A : List[Any]=True,__A : Dict=True,__A : int=False,__A : List[str]=True,__A : Optional[int]=9_9,__A : Optional[Any]=3_2,__A : Any=5,__A : Union[str, Any]=4,__A : Optional[int]=3_7,__A : Union[str, Any]="gelu",__A : Any=0.1,__A : List[Any]=0.1,__A : Union[str, Any]=5_1_2,__A : str=1_6,__A : Optional[int]=2,__A : str=0.02,__A : Any=3,__A : Union[str, Any]=4,__A : List[Any]=None,):
_lowerCamelCase : str = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : str = use_input_mask
_lowerCamelCase : int = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Union[str, Any] = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : Any = scope
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[str] = None
if self.use_input_mask:
_lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : int = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int ):
return DistilBertConfig(
vocab_size=self.vocab_size,dim=self.hidden_size,n_layers=self.num_hidden_layers,n_heads=self.num_attention_heads,hidden_dim=self.intermediate_size,hidden_act=self.hidden_act,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,)
def lowerCamelCase_ ( self : str,__A : List[Any],__A : List[str],__A : Any,__A : str,__A : str,__A : Dict ):
_lowerCamelCase : List[str] = DistilBertModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A,__A )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : str,__A : List[str],__A : Dict,__A : List[str],__A : int,__A : Tuple,__A : Dict ):
_lowerCamelCase : List[Any] = DistilBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A,attention_mask=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Any,__A : Tuple,__A : Any,__A : str,__A : str ):
_lowerCamelCase : Union[str, Any] = DistilBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Any = model(
__A,attention_mask=__A,start_positions=__A,end_positions=__A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : int,__A : Optional[Any],__A : str,__A : str,__A : List[str],__A : Union[str, Any],__A : int ):
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : str = DistilBertForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : int = model(__A,attention_mask=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int],__A : int,__A : Tuple,__A : List[str],__A : Tuple,__A : List[Any],__A : Dict ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : int = DistilBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A,attention_mask=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str,__A : str,__A : str,__A : str,__A : str,__A : Union[str, Any],__A : List[Any] ):
_lowerCamelCase : Any = self.num_choices
_lowerCamelCase : int = DistilBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : Any = model(
__A,attention_mask=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : List[str] = config_and_inputs
_lowerCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Dict = DistilBertModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self,config_class=__A,dim=3_7 )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__A )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A )
@slow
def lowerCamelCase_ ( self : Dict ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = DistilBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowerCamelCase : List[str] = True
_lowerCamelCase : int = model_class(config=__A )
_lowerCamelCase : str = self._prepare_for_class(__A,__A )
_lowerCamelCase : Dict = torch.jit.trace(
__A,(inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A,os.path.join(__A,"traced_model.pt" ) )
_lowerCamelCase : Any = torch.jit.load(os.path.join(__A,"traced_model.pt" ),map_location=__A )
loaded(inputs_dict["input_ids"].to(__A ),inputs_dict["attention_mask"].to(__A ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_lowerCamelCase : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : List[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape,__A )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
import cva
import numpy as np
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : float,__A : int ):
if k in (0.04, 0.06):
_lowerCamelCase : Optional[int] = k
_lowerCamelCase : int = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : List[str] ):
return str(self.k )
def lowerCamelCase_ ( self : Optional[int],__A : str ):
_lowerCamelCase : int = cva.imread(__A,0 )
_lowerCamelCase , _lowerCamelCase : List[Any] = img.shape
_lowerCamelCase : str = []
_lowerCamelCase : Tuple = img.copy()
_lowerCamelCase : List[Any] = cva.cvtColor(__A,cva.COLOR_GRAY2RGB )
_lowerCamelCase , _lowerCamelCase : Tuple = np.gradient(__A )
_lowerCamelCase : Tuple = dx**2
_lowerCamelCase : Tuple = dy**2
_lowerCamelCase : List[str] = dx * dy
_lowerCamelCase : Optional[int] = 0.04
_lowerCamelCase : Tuple = self.window_size // 2
for y in range(__A,h - offset ):
for x in range(__A,w - offset ):
_lowerCamelCase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : List[str] = (wxx * wyy) - (wxy**2)
_lowerCamelCase : Dict = wxx + wyy
_lowerCamelCase : Optional[int] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0),0 )
color_img.itemset((y, x, 1),0 )
color_img.itemset((y, x, 2),2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase_ : Any = HarrisCorner(0.04, 3)
UpperCAmelCase_, UpperCAmelCase_ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
import argparse
import json
import subprocess
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[Any] = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_lowerCamelCase : Dict = subprocess.run(_lowerCAmelCase , shell=_lowerCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase : Any = output.stdout.decode("utf-8" )
_lowerCamelCase : Any = json.loads(_lowerCAmelCase )
_lowerCamelCase : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCAmelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
_lowerCamelCase : Union[str, Any] = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return values.split("," )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
UpperCAmelCase_ : str = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : Any ):
_lowerCamelCase : Any = str(id_ )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Optional[Any] = {} # {vertex:distance}
def __lt__( self : int,__A : List[Any] ):
return self.key < other.key
def __repr__( self : List[str] ):
return self.id
def lowerCamelCase_ ( self : List[str],__A : Optional[int] ):
self.neighbors.append(__A )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : int ):
_lowerCamelCase : Optional[Any] = weight
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list , _lowerCAmelCase : Vertex ):
"""simple docstring"""
_lowerCamelCase : str = []
for u in graph:
_lowerCamelCase : Any = math.inf
_lowerCamelCase : List[str] = None
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Tuple = graph[:]
while q:
_lowerCamelCase : List[str] = min(_lowerCAmelCase )
q.remove(_lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCamelCase : int = u
_lowerCamelCase : Optional[Any] = u.edges[v.id]
for i in range(1 , len(_lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A_ ( _lowerCAmelCase : list , _lowerCAmelCase : Vertex ):
"""simple docstring"""
for u in graph:
_lowerCamelCase : Any = math.inf
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = list(_lowerCAmelCase )
hq.heapify(_lowerCAmelCase )
while h:
_lowerCamelCase : Dict = hq.heappop(_lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCamelCase : List[Any] = u
_lowerCamelCase : List[str] = u.edges[v.id]
hq.heapify(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : List[str] = 3
class UpperCAmelCase__ ( A ):
pass
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
for shard in shards:
for i in range(_lowerCAmelCase ):
yield {"i": i, "shard": shard}
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = int(os.environ["RANK"] )
_lowerCamelCase : List[str] = int(os.environ["WORLD_SIZE"] )
_lowerCamelCase : Optional[Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCAmelCase )
parser.add_argument("--local_rank" , type=_lowerCAmelCase )
parser.add_argument("--num_workers" , type=_lowerCAmelCase , default=0 )
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : Tuple = args.streaming
_lowerCamelCase : Any = args.num_workers
_lowerCamelCase : Tuple = {"shards": [F'shard_{shard_idx}' for shard_idx in range(_lowerCAmelCase )]}
_lowerCamelCase : int = IterableDataset.from_generator(_lowerCAmelCase , gen_kwargs=_lowerCAmelCase )
if not streaming:
_lowerCamelCase : List[str] = Dataset.from_list(list(_lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = split_dataset_by_node(_lowerCAmelCase , rank=_lowerCAmelCase , world_size=_lowerCAmelCase )
_lowerCamelCase : List[str] = torch.utils.data.DataLoader(_lowerCAmelCase , num_workers=_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_lowerCamelCase : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_lowerCamelCase : Union[str, Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main() | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['image_processor', 'feature_extractor']
lowerCAmelCase_ = 'TvltImageProcessor'
lowerCAmelCase_ = 'TvltFeatureExtractor'
def __init__( self : Any,__A : List[Any],__A : str ):
super().__init__(image_processor=__A,feature_extractor=__A )
_lowerCamelCase : Dict = image_processor
_lowerCamelCase : List[str] = feature_extractor
def __call__( self : List[Any],__A : Optional[int]=None,__A : List[Any]=None,__A : List[str]=None,__A : int=None,__A : Tuple=False,__A : Dict=False,*__A : List[Any],**__A : Optional[Any],):
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
_lowerCamelCase : List[Any] = None
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__A,mask_pixel=__A,*__A,**__A )
if images_mixed is not None:
_lowerCamelCase : Dict = self.image_processor(__A,is_mixed=__A,*__A,**__A )
if audio is not None:
_lowerCamelCase : Optional[int] = self.feature_extractor(
__A,*__A,sampling_rate=__A,mask_audio=__A,**__A )
_lowerCamelCase : Dict = {}
if audio is not None:
output_dict.update(__A )
if images is not None:
output_dict.update(__A )
if images_mixed_dict is not None:
output_dict.update(__A )
return output_dict
@property
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = self.image_processor.model_input_names
_lowerCamelCase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "cache"
_lowerCamelCase : List[str] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Tuple = {"text": "string"}
_lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_lowerCamelCase : List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Any = TextDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = tmp_path / "cache"
_lowerCamelCase : List[Any] = {"text": "string"}
_lowerCamelCase : Dict = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Dict = text_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : int = [text_path]
_lowerCamelCase : List[Any] = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"text": "string"}
_lowerCamelCase : Union[str, Any] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=("train",) ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
_lowerCamelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : Dict = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Dict = TextDatasetReader({"train": text_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowerCamelCase : Optional[int] = {"text": "string"}
_lowerCamelCase : Optional[int] = features.copy() if features else default_expected_features
_lowerCamelCase : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Tuple = TextDatasetReader({"train": text_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if split:
_lowerCamelCase : Tuple = {split: text_path}
else:
_lowerCamelCase : List[str] = "train"
_lowerCamelCase : List[str] = {"train": text_path, "test": text_path}
_lowerCamelCase : str = tmp_path / "cache"
_lowerCamelCase : int = {"text": "string"}
_lowerCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCAmelCase_ : Tuple = data_utils.TransfoXLTokenizer
UpperCAmelCase_ : Any = data_utils.TransfoXLCorpus
UpperCAmelCase_ : Union[str, Any] = data_utils
UpperCAmelCase_ : int = data_utils
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCAmelCase , "rb" ) as fp:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
_lowerCamelCase : Any = corpus.vocab.__dict__
torch.save(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _lowerCAmelCase )
_lowerCamelCase : Tuple = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowerCamelCase : Union[str, Any] = os.path.abspath(_lowerCAmelCase )
_lowerCamelCase : Any = os.path.abspath(_lowerCAmelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowerCamelCase : List[Any] = TransfoXLConfig()
else:
_lowerCamelCase : List[str] = TransfoXLConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : List[str] = TransfoXLLMHeadModel(_lowerCAmelCase )
_lowerCamelCase : List[str] = load_tf_weights_in_transfo_xl(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(_lowerCAmelCase )}' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
UpperCAmelCase_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : bool = True , _lowerCAmelCase : float = math.inf , _lowerCAmelCase : float = -math.inf , _lowerCAmelCase : float = math.inf , _lowerCAmelCase : float = -math.inf , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 100 , _lowerCAmelCase : float = 0.0_1 , _lowerCAmelCase : float = 1 , ):
"""simple docstring"""
_lowerCamelCase : int = False
_lowerCamelCase : Optional[int] = search_prob
_lowerCamelCase : Dict = start_temperate
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : int = None
while not search_end:
_lowerCamelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(_lowerCAmelCase )
iterations += 1
_lowerCamelCase : str = None
_lowerCamelCase : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : Dict = random.randint(0 , len(_lowerCAmelCase ) - 1 ) # picking a random neighbor
_lowerCamelCase : Union[str, Any] = neighbors.pop(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : Tuple = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : str = picked_neighbor
else:
_lowerCamelCase : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : str = picked_neighbor
_lowerCamelCase : str = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowerCAmelCase ) , _lowerCAmelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : List[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
)
UpperCAmelCase_ : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : str = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
) | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'instructblip_vision_model'
def __init__( self : List[Any],__A : Dict=1_4_0_8,__A : Dict=6_1_4_4,__A : Any=3_9,__A : Optional[int]=1_6,__A : Any=2_2_4,__A : List[str]=1_4,__A : Any="gelu",__A : int=1e-6,__A : str=0.0,__A : Dict=1e-10,__A : List[Any]=True,**__A : List[Any],):
super().__init__(**__A )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[int] = qkv_bias
@classmethod
def lowerCamelCase_ ( cls : Dict,__A : Union[str, os.PathLike],**__A : List[Any] ):
cls._set_token_in_kwargs(__A )
_lowerCamelCase : Tuple = cls.get_config_dict(__A,**__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_lowerCamelCase : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A,**__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'instructblip_qformer'
def __init__( self : int,__A : Union[str, Any]=3_0_5_2_2,__A : Any=7_6_8,__A : Union[str, Any]=1_2,__A : List[str]=1_2,__A : Optional[Any]=3_0_7_2,__A : Dict="gelu",__A : Tuple=0.1,__A : str=0.1,__A : Union[str, Any]=5_1_2,__A : Optional[int]=0.02,__A : List[Any]=1e-12,__A : str=0,__A : Union[str, Any]="absolute",__A : str=2,__A : List[Any]=1_4_0_8,**__A : Optional[Any],):
super().__init__(pad_token_id=__A,**__A )
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : Optional[int] = cross_attention_frequency
_lowerCamelCase : Dict = encoder_hidden_size
@classmethod
def lowerCamelCase_ ( cls : Dict,__A : Union[str, os.PathLike],**__A : Tuple ):
cls._set_token_in_kwargs(__A )
_lowerCamelCase : Optional[int] = cls.get_config_dict(__A,**__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_lowerCamelCase : Union[str, Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A,**__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'instructblip'
lowerCAmelCase_ = True
def __init__( self : Any,__A : Tuple=None,__A : Dict=None,__A : Union[str, Any]=None,__A : Dict=3_2,**__A : Tuple ):
super().__init__(**__A )
if vision_config is None:
_lowerCamelCase : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_lowerCamelCase : Optional[int] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_lowerCamelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_lowerCamelCase : Dict = InstructBlipVisionConfig(**__A )
_lowerCamelCase : List[Any] = InstructBlipQFormerConfig(**__A )
_lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
_lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**__A )
_lowerCamelCase : Any = self.text_config.tie_word_embeddings
_lowerCamelCase : Optional[int] = self.text_config.is_encoder_decoder
_lowerCamelCase : Optional[int] = num_query_tokens
_lowerCamelCase : Optional[int] = self.vision_config.hidden_size
_lowerCamelCase : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCamelCase : Optional[Any] = 1.0
_lowerCamelCase : int = 0.02
@classmethod
def lowerCamelCase_ ( cls : Optional[int],__A : InstructBlipVisionConfig,__A : InstructBlipQFormerConfig,__A : PretrainedConfig,**__A : Tuple,):
return cls(
vision_config=vision_config.to_dict(),qformer_config=qformer_config.to_dict(),text_config=text_config.to_dict(),**__A,)
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : str = self.vision_config.to_dict()
_lowerCamelCase : List[str] = self.qformer_config.to_dict()
_lowerCamelCase : Tuple = self.text_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCAmelCase__ :
def __init__( self : Union[str, Any],__A : List[str],__A : Union[str, Any]=1_3,__A : str=7,__A : List[Any]=6,__A : Optional[Any]=1_7,__A : Tuple=2_3,__A : int=1_1,__A : Optional[int]=True,):
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : List[Any] = act_dim
_lowerCamelCase : Dict = state_dim
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : int = is_training
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, self.seq_length),vocab_size=1_0_0_0 )
_lowerCamelCase : Dict = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCamelCase : Optional[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self : int ):
return DecisionTransformerConfig(
batch_size=self.batch_size,seq_length=self.seq_length,act_dim=self.act_dim,state_dim=self.state_dim,hidden_size=self.hidden_size,max_length=self.max_length,)
def lowerCamelCase_ ( self : int,__A : List[str],__A : Union[str, Any],__A : Dict,__A : Any,__A : List[str],__A : Any,__A : str,):
_lowerCamelCase : Optional[int] = DecisionTransformerModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Any = model(__A,__A,__A,__A,__A,__A )
self.parent.assertEqual(result.state_preds.shape,states.shape )
self.parent.assertEqual(result.action_preds.shape,actions.shape )
self.parent.assertEqual(result.return_preds.shape,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Optional[int] = config_and_inputs
_lowerCamelCase : List[str] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , A , unittest.TestCase ):
lowerCAmelCase_ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase_ = ()
lowerCAmelCase_ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Union[str, Any] = DecisionTransformerModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = DecisionTransformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Tuple = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__A )],__A )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = 2 # number of steps of autoregressive prediction we will perform
_lowerCamelCase : Tuple = 1_0 # defined by the RL environment, may be normalized
_lowerCamelCase : List[str] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
_lowerCamelCase : str = model.to(__A )
_lowerCamelCase : Optional[Any] = model.config
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = torch.randn(1,1,config.state_dim ).to(device=__A,dtype=torch.floataa ) # env.reset()
_lowerCamelCase : List[Any] = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]],device=__A )
_lowerCamelCase : Dict = torch.tensor(__A,device=__A,dtype=torch.floataa ).reshape(1,1,1 )
_lowerCamelCase : int = state
_lowerCamelCase : int = torch.zeros(1,0,config.act_dim,device=__A,dtype=torch.floataa )
_lowerCamelCase : List[Any] = torch.zeros(1,0,device=__A,dtype=torch.floataa )
_lowerCamelCase : int = torch.tensor(0,device=__A,dtype=torch.long ).reshape(1,1 )
for step in range(__A ):
_lowerCamelCase : Any = torch.cat([actions, torch.zeros(1,1,config.act_dim,device=__A )],dim=1 )
_lowerCamelCase : Any = torch.cat([rewards, torch.zeros(1,1,device=__A )],dim=1 )
_lowerCamelCase : Optional[int] = torch.ones(1,states.shape[1] ).to(dtype=torch.long,device=states.device )
with torch.no_grad():
_lowerCamelCase : List[Any] = model(
states=__A,actions=__A,rewards=__A,returns_to_go=__A,timesteps=__A,attention_mask=__A,return_dict=__A,)
self.assertEqual(action_pred.shape,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1],expected_outputs[step],atol=1e-4 ) )
_lowerCamelCase : Any = ( # env.step(action)
torch.randn(1,1,config.state_dim ).to(device=__A,dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCamelCase : int = action_pred[0, -1]
_lowerCamelCase : List[Any] = torch.cat([states, state],dim=1 )
_lowerCamelCase : Dict = returns_to_go[0, -1] - reward
_lowerCamelCase : Any = torch.cat([returns_to_go, pred_return.reshape(1,1,1 )],dim=1 )
_lowerCamelCase : Dict = torch.cat(
[timesteps, torch.ones((1, 1),device=__A,dtype=torch.long ) * (step + 1)],dim=1 ) | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Tuple = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = ['YolosFeatureExtractor']
UpperCAmelCase_ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import copy
import re
class UpperCAmelCase__ :
lowerCAmelCase_ = 'hp'
lowerCAmelCase_ = {}
lowerCAmelCase_ = None
@classmethod
def lowerCamelCase_ ( cls : Any,__A : Tuple,__A : Dict ):
_lowerCamelCase : Optional[Any] = prefix
_lowerCamelCase : int = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase_ ( __A : List[str],__A : Tuple ):
if len(__A ) == 0:
return ""
_lowerCamelCase : Any = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1,len(__A ) + 1 ):
_lowerCamelCase : List[str] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_lowerCamelCase : Optional[int] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__A : Tuple ):
_lowerCamelCase : Dict = ""
while integer != 0:
_lowerCamelCase : Optional[int] = chr(ord("A" ) + integer % 1_0 ) + s
integer //= 1_0
return s
_lowerCamelCase : Any = 0
while True:
_lowerCamelCase : Optional[int] = word + "#" + int_to_alphabetic(__A )
if sword in info["reverse_short_word"]:
continue
else:
_lowerCamelCase : Dict = sword
break
_lowerCamelCase : Dict = short_word
_lowerCamelCase : List[Any] = word
return short_word
@staticmethod
def lowerCamelCase_ ( __A : str,__A : List[str] ):
_lowerCamelCase : Tuple = param_name.split("_" )
_lowerCamelCase : List[Any] = [TrialShortNamer.shortname_for_word(__A,__A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_lowerCamelCase : List[str] = ["", "_"]
for separator in separators:
_lowerCamelCase : int = separator.join(__A )
if shortname not in info["reverse_short_param"]:
_lowerCamelCase : List[Any] = shortname
_lowerCamelCase : Any = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase_ ( __A : Tuple,__A : Dict ):
_lowerCamelCase : Optional[int] = TrialShortNamer.shortname_for_key(__A,__A )
_lowerCamelCase : Dict = short_name
_lowerCamelCase : List[Any] = param_name
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] ):
if cls.NAMING_INFO is not None:
return
_lowerCamelCase : Dict = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
_lowerCamelCase : List[Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__A,__A )
_lowerCamelCase : Dict = info
@classmethod
def lowerCamelCase_ ( cls : List[str],__A : Any ):
cls.build_naming_info()
assert cls.PREFIX is not None
_lowerCamelCase : str = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_lowerCamelCase : Tuple = cls.NAMING_INFO["short_param"][k]
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = 1 if v else 0
_lowerCamelCase : Any = "" if isinstance(__A,(int, float) ) else "-"
_lowerCamelCase : int = f'{key}{sep}{v}'
name.append(__A )
return "_".join(__A )
@classmethod
def lowerCamelCase_ ( cls : Tuple,__A : Optional[Any] ):
_lowerCamelCase : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_lowerCamelCase : int = []
else:
_lowerCamelCase : List[str] = repr.split("_" )
_lowerCamelCase : Union[str, Any] = {}
for value in values:
if "-" in value:
_lowerCamelCase : Optional[Any] = value.split("-" )
else:
_lowerCamelCase : Optional[int] = re.sub("[0-9.]","",__A )
_lowerCamelCase : Optional[Any] = float(re.sub("[^0-9.]","",__A ) )
_lowerCamelCase : Tuple = cls.NAMING_INFO["reverse_short_param"][p_k]
_lowerCamelCase : Any = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_lowerCamelCase : Any = cls.DEFAULTS[k]
return parameters | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['prompt']
lowerCAmelCase_ = ['prompt', 'negative_prompt']
lowerCAmelCase_ = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def lowerCamelCase_ ( self : List[Any] ):
return 3_2
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 3_2
@property
def lowerCamelCase_ ( self : Optional[int] ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Tuple ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : Optional[int] ):
return 1_0_0
@property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCamelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=3_7,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,)
return CLIPTextModelWithProjection(__A )
@property
def lowerCamelCase_ ( self : Any ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_lowerCamelCase : Any = PriorTransformer(**__A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowerCamelCase : Optional[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,image_size=2_2_4,projection_dim=self.text_embedder_hidden_size,intermediate_size=3_7,num_attention_heads=4,num_channels=3,num_hidden_layers=5,patch_size=1_4,)
_lowerCamelCase : Any = CLIPVisionModelWithProjection(__A )
return model
@property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = CLIPImageProcessor(
crop_size=2_2_4,do_center_crop=__A,do_normalize=__A,do_resize=__A,image_mean=[0.48145466, 0.4578275, 0.40821073],image_std=[0.26862954, 0.26130258, 0.27577711],resample=3,size=2_2_4,)
return image_processor
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = self.dummy_prior
_lowerCamelCase : Any = self.dummy_image_encoder
_lowerCamelCase : Any = self.dummy_text_encoder
_lowerCamelCase : List[str] = self.dummy_tokenizer
_lowerCamelCase : List[str] = self.dummy_image_processor
_lowerCamelCase : Optional[Any] = UnCLIPScheduler(
variance_type="fixed_small_log",prediction_type="sample",num_train_timesteps=1_0_0_0,clip_sample=__A,clip_sample_range=10.0,)
_lowerCamelCase : Optional[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCamelCase_ ( self : int,__A : List[Any],__A : Tuple=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : Any = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Tuple = "cpu"
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : int = self.pipeline_class(**__A )
_lowerCamelCase : str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : int = pipe(**self.get_dummy_inputs(__A ) )
_lowerCamelCase : Any = output.image_embeds
_lowerCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__A ),return_dict=__A,)[0]
_lowerCamelCase : Any = image[0, -1_0:]
_lowerCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_lowerCamelCase : str = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Tuple = torch_device == "cpu"
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=__A,relax_max_difference=__A,test_mean_pixel_difference=__A,)
@skip_mps
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = torch_device == "cpu"
_lowerCamelCase : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=__A,test_mean_pixel_difference=__A,) | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'AutoTokenizer'
lowerCAmelCase_ = ['tokenizer']
lowerCAmelCase_ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : str,__A : int,__A : int=None ):
super().__init__(__A )
_lowerCamelCase : Optional[Any] = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : List[Any],__A : Optional[Any]="speaker_embeddings_path.json",**__A : Union[str, Any] ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : int = get_file_from_repo(
__A,__A,subfolder=kwargs.pop("subfolder",__A ),cache_dir=kwargs.pop("cache_dir",__A ),force_download=kwargs.pop("force_download",__A ),proxies=kwargs.pop("proxies",__A ),resume_download=kwargs.pop("resume_download",__A ),local_files_only=kwargs.pop("local_files_only",__A ),use_auth_token=kwargs.pop("use_auth_token",__A ),revision=kwargs.pop("revision",__A ),)
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(__A,__A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_lowerCamelCase : Dict = None
else:
with open(__A ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(__A )
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(__A,**__A )
return cls(tokenizer=__A,speaker_embeddings=__A )
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],__A : Optional[Any]="speaker_embeddings_path.json",__A : int="speaker_embeddings",__A : bool = False,**__A : Tuple,):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__A,__A,"v2" ),exist_ok=__A )
_lowerCamelCase : Dict = {}
_lowerCamelCase : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Union[str, Any] = self._load_voice_preset(__A )
_lowerCamelCase : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"],__A,f'{prompt_key}_{key}' ),voice_preset[key],allow_pickle=__A,)
_lowerCamelCase : List[str] = os.path.join(__A,f'{prompt_key}_{key}.npy' )
_lowerCamelCase : Any = tmp_dict
with open(os.path.join(__A,__A ),"w" ) as fp:
json.dump(__A,__A )
super().save_pretrained(__A,__A,**__A )
def lowerCamelCase_ ( self : Any,__A : str = None,**__A : Optional[int] ):
_lowerCamelCase : Optional[int] = self.speaker_embeddings[voice_preset]
_lowerCamelCase : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_lowerCamelCase : List[str] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path","/" ),voice_preset_paths[key],subfolder=kwargs.pop("subfolder",__A ),cache_dir=kwargs.pop("cache_dir",__A ),force_download=kwargs.pop("force_download",__A ),proxies=kwargs.pop("proxies",__A ),resume_download=kwargs.pop("resume_download",__A ),local_files_only=kwargs.pop("local_files_only",__A ),use_auth_token=kwargs.pop("use_auth_token",__A ),revision=kwargs.pop("revision",__A ),)
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path","/" ),voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_lowerCamelCase : int = np.load(__A )
return voice_preset_dict
def lowerCamelCase_ ( self : Any,__A : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key],np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Tuple,__A : List[Any]=None,__A : Optional[int]=None,__A : str="pt",__A : Tuple=2_5_6,__A : Dict=False,__A : List[Any]=True,__A : Dict=False,**__A : Tuple,):
if voice_preset is not None and not isinstance(__A,__A ):
if (
isinstance(__A,__A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Optional[Any] = self._load_voice_preset(__A )
else:
if isinstance(__A,__A ) and not voice_preset.endswith(".npz" ):
_lowerCamelCase : List[str] = voice_preset + ".npz"
_lowerCamelCase : List[Any] = np.load(__A )
if voice_preset is not None:
self._validate_voice_preset_dict(__A,**__A )
_lowerCamelCase : List[str] = BatchFeature(data=__A,tensor_type=__A )
_lowerCamelCase : Dict = self.tokenizer(
__A,return_tensors=__A,padding="max_length",max_length=__A,return_attention_mask=__A,return_token_type_ids=__A,add_special_tokens=__A,**__A,)
if voice_preset is not None:
_lowerCamelCase : Any = voice_preset
return encoded_text | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : List[str] = 1.0_5457_1817E-34 # unit of ℏ : J * s
UpperCAmelCase_ : Any = 3E8 # unit of c : m * s^-1
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCamelCase : List[str] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCamelCase : Dict = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCamelCase : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase_ : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
"""simple docstring"""
return max(metric_fn(_lowerCAmelCase , _lowerCAmelCase ) for gt in ground_truths )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
_lowerCamelCase : Optional[Any] = pd.read_csv(_lowerCAmelCase , sep="\t" , header=_lowerCAmelCase )
for answer_list in data[1]:
_lowerCamelCase : Union[str, Any] = ast.literal_eval(_lowerCAmelCase )
answers.append(_lowerCAmelCase )
else:
_lowerCamelCase : str = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : List[Any] = [[reference] for reference in references]
_lowerCamelCase : Union[str, Any] = 0
for prediction, ground_truths in zip(_lowerCAmelCase , _lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
fa += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Dict = 100.0 * em / total
_lowerCamelCase : Dict = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : int = args.k
_lowerCamelCase : Optional[Any] = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : Union[str, Any] = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : Optional[Any] = 0
for hypo, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = set(hypo.split("\t" )[:k] )
_lowerCamelCase : List[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCamelCase : List[Any] = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
def strip_title(_lowerCAmelCase : List[Any] ):
if title.startswith("\"" ):
_lowerCamelCase : Dict = title[1:]
if title.endswith("\"" ):
_lowerCamelCase : List[str] = title[:-1]
return title
_lowerCamelCase : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )["input_ids"].to(args.device )
_lowerCamelCase : Any = rag_model.rag.question_encoder(_lowerCAmelCase )
_lowerCamelCase : str = question_enc_outputs[0]
_lowerCamelCase : List[str] = rag_model.retriever(
_lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
_lowerCamelCase : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCamelCase : str = []
for docs in all_docs:
_lowerCamelCase : Union[str, Any] = [strip_title(_lowerCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_lowerCAmelCase ) )
return provenance_strings
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = inputs_dict.input_ids.to(args.device )
_lowerCamelCase : Any = inputs_dict.attention_mask.to(args.device )
_lowerCamelCase : Dict = rag_model.generate( # rag_model overwrites generate
_lowerCAmelCase , attention_mask=_lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCamelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
if args.print_predictions:
for q, a in zip(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("Q: {} - A: {}".format(_lowerCAmelCase , _lowerCAmelCase ) )
return answers
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_lowerCAmelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_lowerCAmelCase , choices=["exact", "compressed", "legacy"] , type=_lowerCAmelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_lowerCAmelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_lowerCAmelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_lowerCAmelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_lowerCAmelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_lowerCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_lowerCAmelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_lowerCAmelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_lowerCAmelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=_lowerCAmelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
_lowerCamelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
if args.model_type is None:
_lowerCamelCase : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
_lowerCamelCase : Any = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
_lowerCamelCase : Dict = args.n_docs
if args.index_name is not None:
_lowerCamelCase : List[Any] = args.index_name
if args.index_path is not None:
_lowerCamelCase : Union[str, Any] = args.index_path
else:
_lowerCamelCase : Optional[Any] = BartForConditionalGeneration
_lowerCamelCase : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _lowerCAmelCase )
_lowerCamelCase : str = get_scores if args.eval_mode == "e2e" else get_precision_at_k
_lowerCamelCase : str = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_lowerCAmelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
_lowerCamelCase : List[Any] = RagRetriever.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model_class.from_pretrained(_lowerCAmelCase , retriever=_lowerCAmelCase , **_lowerCAmelCase )
model.retriever.init_retrieval()
else:
_lowerCamelCase : str = model_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
_lowerCamelCase : Optional[Any] = []
for line in tqdm(_lowerCAmelCase ):
questions.append(line.strip() )
if len(_lowerCAmelCase ) == args.eval_batch_size:
_lowerCamelCase : Optional[Any] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) + "\n" )
preds_file.flush()
_lowerCamelCase : str = []
if len(_lowerCAmelCase ) > 0:
_lowerCamelCase : Optional[int] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) )
preds_file.flush()
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase_ : Any = get_args()
main(args) | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = GPTSwaTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : int = GPTSwaTokenizer(__A,eos_token="<unk>",bos_token="<unk>",pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any],__A : Optional[int] ):
_lowerCamelCase : Tuple = "This is a test"
_lowerCamelCase : Any = "This is a test"
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = "<s>"
_lowerCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ),__A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ),__A )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"<unk>" )
self.assertEqual(vocab_keys[1],"<s>" )
self.assertEqual(vocab_keys[-1],"j" )
self.assertEqual(len(__A ),2_0_0_0 )
def lowerCamelCase_ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size,2_0_0_0 )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = GPTSwaTokenizer(__A )
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_lowerCamelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__A,["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."],)
# fmt: on
_lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A,[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],)
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__A )
# fmt: off
self.assertListEqual(
__A,["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[int] = GPTSwaTokenizer(__A )
_lowerCamelCase : str = ["This is a test", "I was born in 92000, and this is falsé."]
_lowerCamelCase : Tuple = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__A,__A ):
self.assertListEqual(tokenizer.encode_fast(__A ),__A )
# Test that decode_fast returns the input text
for text, token_ids in zip(__A,__A ):
self.assertEqual(tokenizer.decode_fast(__A ),__A )
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_lowerCamelCase : List[Any] = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A,model_name="AI-Sweden/gpt-sw3-126m",sequences=__A,) | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
while a != 0:
_lowerCamelCase : Union[str, Any] = b % a, a
return b
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if gcd(_lowerCAmelCase , _lowerCAmelCase ) != 1:
_lowerCamelCase : List[Any] = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = 1, 0, a
_lowerCamelCase : Optional[int] = 0, 1, m
while va != 0:
_lowerCamelCase : str = ua // va
_lowerCamelCase : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] = BlipImageProcessor()
_lowerCamelCase : Union[str, Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
_lowerCamelCase : List[str] = BlipaProcessor(__A,__A )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any],**__A : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__A ).tokenizer
def lowerCamelCase_ ( self : Tuple,**__A : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__A ).image_processor
def lowerCamelCase_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = [np.random.randint(2_5_5,size=(3, 3_0, 4_0_0),dtype=np.uinta )]
_lowerCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(__A,0,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Dict = self.get_tokenizer(bos_token="(BOS)",eos_token="(EOS)" )
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=__A,padding_value=1.0 )
_lowerCamelCase : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname,bos_token="(BOS)",eos_token="(EOS)",do_normalize=__A,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,__A )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCamelCase : Tuple = image_processor(__A,return_tensors="np" )
_lowerCamelCase : int = processor(images=__A,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1e-2 )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Tuple = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Dict = "lower newer"
_lowerCamelCase : Optional[Any] = processor(text=__A )
_lowerCamelCase : Any = tokenizer(__A,return_token_type_ids=__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : int = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : int = self.prepare_image_inputs()
_lowerCamelCase : Any = processor(text=__A,images=__A )
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Tuple = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Tuple = processor.batch_decode(__A )
_lowerCamelCase : Any = tokenizer.batch_decode(__A )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : str = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : List[Any] = "lower newer"
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = processor(text=__A,images=__A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] ) | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return False
_lowerCamelCase : int = len(_lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase_ : Any = [int(item.strip()) for item in user_input.split(',')]
UpperCAmelCase_ : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
UpperCAmelCase_ : Union[str, Any] = '' if binary_search(sequence, target) else 'not '
print(f'''{target} was {not_str}found in {sequence}''') | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['image_processor', 'tokenizer']
lowerCAmelCase_ = 'OwlViTImageProcessor'
lowerCAmelCase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any],__A : str=None,__A : Optional[int]=None,**__A : Dict ):
_lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",__A,)
_lowerCamelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A,__A )
def __call__( self : Any,__A : Tuple=None,__A : Optional[Any]=None,__A : Optional[int]=None,__A : Union[str, Any]="max_length",__A : Dict="np",**__A : int ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__A,__A ) or (isinstance(__A,__A ) and not isinstance(text[0],__A )):
_lowerCamelCase : Tuple = [self.tokenizer(__A,padding=__A,return_tensors=__A,**__A )]
elif isinstance(__A,__A ) and isinstance(text[0],__A ):
_lowerCamelCase : Dict = []
# Maximum number of queries across batch
_lowerCamelCase : Optional[int] = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
_lowerCamelCase : Optional[Any] = t + [" "] * (max_num_queries - len(__A ))
_lowerCamelCase : Union[str, Any] = self.tokenizer(__A,padding=__A,return_tensors=__A,**__A )
encodings.append(__A )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_lowerCamelCase : int = np.concatenate([encoding["input_ids"] for encoding in encodings],axis=0 )
_lowerCamelCase : List[str] = np.concatenate([encoding["attention_mask"] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowerCamelCase : str = jnp.concatenate([encoding["input_ids"] for encoding in encodings],axis=0 )
_lowerCamelCase : List[str] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowerCamelCase : Tuple = torch.cat([encoding["input_ids"] for encoding in encodings],dim=0 )
_lowerCamelCase : List[str] = torch.cat([encoding["attention_mask"] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowerCamelCase : str = tf.stack([encoding["input_ids"] for encoding in encodings],axis=0 )
_lowerCamelCase : str = tf.stack([encoding["attention_mask"] for encoding in encodings],axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_lowerCamelCase : Tuple = BatchEncoding()
_lowerCamelCase : List[Any] = input_ids
_lowerCamelCase : Dict = attention_mask
if query_images is not None:
_lowerCamelCase : Tuple = BatchEncoding()
_lowerCamelCase : Optional[Any] = self.image_processor(
__A,return_tensors=__A,**__A ).pixel_values
_lowerCamelCase : Tuple = query_pixel_values
if images is not None:
_lowerCamelCase : Tuple = self.image_processor(__A,return_tensors=__A,**__A )
if text is not None and images is not None:
_lowerCamelCase : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ),tensor_type=__A )
def lowerCamelCase_ ( self : List[str],*__A : int,**__A : Tuple ):
return self.image_processor.post_process(*__A,**__A )
def lowerCamelCase_ ( self : Union[str, Any],*__A : List[str],**__A : List[str] ):
return self.image_processor.post_process_object_detection(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : int,**__A : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__A,**__A )
def lowerCamelCase_ ( self : Tuple,*__A : List[Any],**__A : Union[str, Any] ):
return self.tokenizer.batch_decode(*__A,**__A )
def lowerCamelCase_ ( self : Tuple,*__A : Tuple,**__A : Dict ):
return self.tokenizer.decode(*__A,**__A )
@property
def lowerCamelCase_ ( self : Dict ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",__A,)
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Any ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",__A,)
return self.image_processor | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
lowerCAmelCase_ = PegasusConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = 'gelu'
def __init__( self : str,__A : Tuple,__A : Optional[Any]=1_3,__A : Tuple=7,__A : List[str]=True,__A : Any=False,__A : Tuple=9_9,__A : List[Any]=3_2,__A : Union[str, Any]=2,__A : Dict=4,__A : int=3_7,__A : Optional[Any]=0.1,__A : Optional[Any]=0.1,__A : Optional[int]=4_0,__A : List[str]=2,__A : List[str]=1,__A : List[Any]=0,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[str] = eos_token_id
_lowerCamelCase : List[str] = pad_token_id
_lowerCamelCase : Union[str, Any] = bos_token_id
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size )
_lowerCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ),1 )
_lowerCamelCase : Any = tf.concat([input_ids, eos_tensor],axis=1 )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_ids=[2],bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,decoder_start_token_id=self.pad_token_id,**self.config_updates,)
_lowerCamelCase : str = prepare_pegasus_inputs_dict(__A,__A,__A )
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any],__A : int,__A : int ):
_lowerCamelCase : Any = TFPegasusModel(config=__A ).get_decoder()
_lowerCamelCase : List[str] = inputs_dict["input_ids"]
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Any = inputs_dict["attention_mask"][:1, :]
_lowerCamelCase : Tuple = inputs_dict["head_mask"]
_lowerCamelCase : Optional[int] = 1
# first forward pass
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A,head_mask=__A,use_cache=__A )
_lowerCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : int = ids_tensor((self.batch_size, 3),config.vocab_size )
_lowerCamelCase : int = tf.cast(ids_tensor((self.batch_size, 3),2 ),tf.inta )
# append to next input_ids and
_lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens],axis=-1 )
_lowerCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask],axis=-1 )
_lowerCamelCase : Dict = model(__A,attention_mask=__A )[0]
_lowerCamelCase : List[str] = model(__A,attention_mask=__A,past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1],output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Optional[int] = int(ids_tensor((1,),output_from_past.shape[-1] ) )
_lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A,__A,rtol=1e-3 )
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = TFPegasusModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self,config_class=__A )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase_ = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase_ = 'google/pegasus-xsum'
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase_ ( self : List[Any],**__A : Dict ):
_lowerCamelCase : Union[str, Any] = self.translate_src_text(**__A )
assert self.expected_text == generated_words
def lowerCamelCase_ ( self : str,**__A : Dict ):
_lowerCamelCase : Dict = self.tokenizer(self.src_text,**__A,padding=__A,return_tensors="tf" )
_lowerCamelCase : Any = self.model.generate(
model_inputs.input_ids,attention_mask=model_inputs.attention_mask,num_beams=2,use_cache=__A,)
_lowerCamelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(),skip_special_tokens=__A )
return generated_words
@slow
def lowerCamelCase_ ( self : int ):
self._assert_generated_batch_equal_expected() | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def A_ ( _lowerCAmelCase : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
_lowerCamelCase : List[str] = nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = nums[i]
_lowerCamelCase : Tuple = max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number of elements : ').strip())
UpperCAmelCase_ : Tuple = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['image_processor']
lowerCAmelCase_ = 'SamImageProcessor'
def __init__( self : Any,__A : Optional[int] ):
super().__init__(__A )
_lowerCamelCase : Union[str, Any] = self.image_processor
_lowerCamelCase : Optional[int] = -1_0
_lowerCamelCase : List[str] = self.image_processor.size["longest_edge"]
def __call__( self : Any,__A : Tuple=None,__A : Any=None,__A : Optional[Any]=None,__A : Dict=None,__A : Optional[Union[str, TensorType]] = None,**__A : str,):
_lowerCamelCase : str = self.image_processor(
__A,return_tensors=__A,**__A,)
# pop arguments that are not used in the foward but used nevertheless
_lowerCamelCase : List[Any] = encoding_image_processor["original_sizes"]
if hasattr(__A,"numpy" ): # Checks if Torch or TF tensor
_lowerCamelCase : Dict = original_sizes.numpy()
_lowerCamelCase : Union[str, Any] = self._check_and_preprocess_points(
input_points=__A,input_labels=__A,input_boxes=__A,)
_lowerCamelCase : Union[str, Any] = self._normalize_and_convert(
__A,__A,input_points=__A,input_labels=__A,input_boxes=__A,return_tensors=__A,)
return encoding_image_processor
def lowerCamelCase_ ( self : List[str],__A : List[str],__A : Union[str, Any],__A : List[str]=None,__A : int=None,__A : Any=None,__A : int="pt",):
if input_points is not None:
if len(__A ) != len(__A ):
_lowerCamelCase : Tuple = [
self._normalize_coordinates(self.target_size,__A,original_sizes[0] ) for point in input_points
]
else:
_lowerCamelCase : Dict = [
self._normalize_coordinates(self.target_size,__A,__A )
for point, original_size in zip(__A,__A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCamelCase : Dict = self._pad_points_and_labels(__A,__A )
_lowerCamelCase : Optional[int] = np.array(__A )
if input_labels is not None:
_lowerCamelCase : List[Any] = np.array(__A )
if input_boxes is not None:
if len(__A ) != len(__A ):
_lowerCamelCase : Optional[Any] = [
self._normalize_coordinates(self.target_size,__A,original_sizes[0],is_bounding_box=__A )
for box in input_boxes
]
else:
_lowerCamelCase : List[str] = [
self._normalize_coordinates(self.target_size,__A,__A,is_bounding_box=__A )
for box, original_size in zip(__A,__A )
]
_lowerCamelCase : Dict = np.array(__A )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCamelCase : str = torch.from_numpy(__A )
# boxes batch size of 1 by default
_lowerCamelCase : int = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCamelCase : str = tf.convert_to_tensor(__A )
# boxes batch size of 1 by default
_lowerCamelCase : Optional[Any] = tf.expand_dims(__A,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCamelCase : Dict = torch.from_numpy(__A )
# point batch size of 1 by default
_lowerCamelCase : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCamelCase : Any = tf.convert_to_tensor(__A )
# point batch size of 1 by default
_lowerCamelCase : Any = tf.expand_dims(__A,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCamelCase : Optional[int] = torch.from_numpy(__A )
# point batch size of 1 by default
_lowerCamelCase : List[str] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCamelCase : Dict = tf.convert_to_tensor(__A )
# point batch size of 1 by default
_lowerCamelCase : Tuple = tf.expand_dims(__A,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def lowerCamelCase_ ( self : List[str],__A : Dict,__A : str ):
_lowerCamelCase : Optional[Any] = max([point.shape[0] for point in input_points] )
_lowerCamelCase : Dict = []
for i, point in enumerate(__A ):
if point.shape[0] != expected_nb_points:
_lowerCamelCase : Tuple = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
_lowerCamelCase : Dict = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__A )
_lowerCamelCase : Tuple = processed_input_points
return input_points, input_labels
def lowerCamelCase_ ( self : int,__A : int,__A : np.ndarray,__A : Union[str, Any],__A : Optional[int]=False ):
_lowerCamelCase : List[str] = original_size
_lowerCamelCase : Union[str, Any] = self.image_processor._get_preprocess_shape(__A,longest_edge=__A )
_lowerCamelCase : str = deepcopy(__A ).astype(__A )
if is_bounding_box:
_lowerCamelCase : Dict = coords.reshape(-1,2,2 )
_lowerCamelCase : int = coords[..., 0] * (new_w / old_w)
_lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCamelCase : List[Any] = coords.reshape(-1,4 )
return coords
def lowerCamelCase_ ( self : List[str],__A : str=None,__A : Tuple=None,__A : List[Any]=None,):
if input_points is not None:
if hasattr(__A,"numpy" ): # Checks for TF or Torch tensor
_lowerCamelCase : Optional[int] = input_points.numpy().tolist()
if not isinstance(__A,__A ) or not isinstance(input_points[0],__A ):
raise ValueError("Input points must be a list of list of floating points." )
_lowerCamelCase : str = [np.array(__A ) for input_point in input_points]
else:
_lowerCamelCase : List[str] = None
if input_labels is not None:
if hasattr(__A,"numpy" ):
_lowerCamelCase : Any = input_labels.numpy().tolist()
if not isinstance(__A,__A ) or not isinstance(input_labels[0],__A ):
raise ValueError("Input labels must be a list of list integers." )
_lowerCamelCase : str = [np.array(__A ) for label in input_labels]
else:
_lowerCamelCase : int = None
if input_boxes is not None:
if hasattr(__A,"numpy" ):
_lowerCamelCase : str = input_boxes.numpy().tolist()
if (
not isinstance(__A,__A )
or not isinstance(input_boxes[0],__A )
or not isinstance(input_boxes[0][0],__A )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_lowerCamelCase : str = [np.array(__A ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCamelCase : str = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(__A ) )
def lowerCamelCase_ ( self : List[str],*__A : int,**__A : Union[str, Any] ):
return self.image_processor.post_process_masks(*__A,**__A ) | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=_lowerCAmelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=_lowerCAmelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=_lowerCAmelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=_lowerCAmelCase , default=0 , help="cuda_id." , )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
return args
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not len(_lowerCAmelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_lowerCamelCase : Tuple = imgs[0].size
_lowerCamelCase : List[str] = Image.new("RGB" , size=(cols * w, rows * h) )
_lowerCamelCase : List[Any] = grid.size
for i, img in enumerate(_lowerCAmelCase ):
grid.paste(_lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]="robotic cat with wings" , _lowerCAmelCase : int=7.5 , _lowerCAmelCase : Optional[Any]=50 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Dict=42 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.Generator(pipeline.device ).manual_seed(_lowerCAmelCase )
_lowerCamelCase : List[Any] = pipeline(
_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , ).images
_lowerCamelCase : List[str] = int(math.sqrt(_lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = image_grid(_lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase_ : Any = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
UpperCAmelCase_ : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
UpperCAmelCase_ : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
UpperCAmelCase_ : Tuple = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase_ : List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
UpperCAmelCase_ : str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
UpperCAmelCase_ : Optional[int] = unet.to(torch.device('cuda', args.cuda_id))
UpperCAmelCase_ : Any = pipeline.to(unet.device)
UpperCAmelCase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
UpperCAmelCase_ : Union[str, Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModel)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
assert column_title.isupper()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Union[str, Any] = len(_lowerCAmelCase ) - 1
_lowerCamelCase : List[Any] = 0
while index >= 0:
_lowerCamelCase : List[str] = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Tuple,**__A : Optional[int] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase : Tuple = deprecated_arg[3:]
_lowerCamelCase : Tuple = not kwargs.pop(__A )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
_lowerCamelCase : Dict = kwargs.pop("tpu_name",self.tpu_name )
_lowerCamelCase : List[Any] = kwargs.pop("device_idx",self.device_idx )
_lowerCamelCase : Optional[int] = kwargs.pop("eager_mode",self.eager_mode )
_lowerCamelCase : Optional[Any] = kwargs.pop("use_xla",self.use_xla )
super().__init__(**__A )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Name of TPU'} , )
lowerCAmelCase_ = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Benchmark models in eager model.'} )
lowerCAmelCase_ = field(
default=A , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCamelCase_ ( self : List[str] ):
requires_backends(self,["tf"] )
_lowerCamelCase : Any = None
if self.tpu:
try:
if self.tpu_name:
_lowerCamelCase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCamelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCamelCase : Tuple = None
return tpu
@cached_property
def lowerCamelCase_ ( self : Any ):
requires_backends(self,["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCamelCase : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx],"GPU" )
_lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([],"GPU" ) # disable GPU
_lowerCamelCase : Optional[int] = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def lowerCamelCase_ ( self : str ):
requires_backends(self,["tf"] )
return self._setup_tpu is not None
@property
def lowerCamelCase_ ( self : Dict ):
requires_backends(self,["tf"] )
return self._setup_strategy
@property
def lowerCamelCase_ ( self : Dict ):
requires_backends(self,["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def lowerCamelCase_ ( self : Dict ):
requires_backends(self,["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return self.n_gpu > 0 | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
UpperCAmelCase_ : int = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Optional[Any],__A : Optional[Any]=None ):
_lowerCamelCase : List[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self,__A,getattr(__A,__A ) )
_lowerCamelCase : List[str] = module._original_module if isinstance(__A,_PatchedModuleObj ) else module
class UpperCAmelCase__ :
lowerCAmelCase_ = []
def __init__( self : Any,__A : int,__A : str,__A : List[str],__A : Optional[Any]=None ):
_lowerCamelCase : Union[str, Any] = obj
_lowerCamelCase : List[Any] = target
_lowerCamelCase : Any = new
_lowerCamelCase : Optional[int] = target.split("." )[0]
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Any = attrs or []
def __enter__( self : Optional[Any] ):
_lowerCamelCase : Tuple = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__A ) ):
try:
_lowerCamelCase : List[Any] = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : str = getattr(self.obj,__A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__A,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowerCamelCase : Optional[int] = obj_attr
# patch at top level
setattr(self.obj,__A,_PatchedModuleObj(__A,attrs=self.attrs ) )
_lowerCamelCase : Dict = getattr(self.obj,__A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__A,__A,_PatchedModuleObj(getattr(__A,__A,__A ),attrs=self.attrs ) )
_lowerCamelCase : Tuple = getattr(__A,__A )
# finally set the target attribute
setattr(__A,__A,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : List[Any] = getattr(import_module(".".join(__A ) ),__A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj,__A ) is attr_value:
_lowerCamelCase : int = getattr(self.obj,__A )
setattr(self.obj,__A,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : Any = globals()["__builtins__"][target_attr]
setattr(self.obj,__A,self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Dict,*__A : int ):
for attr in list(self.original ):
setattr(self.obj,__A,self.original.pop(__A ) )
def lowerCamelCase_ ( self : Optional[int] ):
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase_ ( self : int ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.